From 4f18b9a6711adbc7c76993c734a94ee3f5c61791 Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Thu, 13 Apr 2023 16:22:53 +0200 Subject: drm/amdgpu: Add support for querying the max ibs in a submission. (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This info would be used by radv to figure out when we need to split a submission into multiple submissions. radv currently has a limit of 192 which seems to work for most gfx submissions, but is way too high for e.g. compute or sdma. Userspace is available at https://gitlab.freedesktop.org/bnieuwenhuizen/mesa/-/commits/ib-rejection-v3 v3: Completely rewrote based on suggestion of making it a separate query. Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2498 Reviewed-by: Christian König Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index b6eb90df5d05..6981e59a9401 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -876,6 +876,8 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_VIDEO_CAPS_DECODE 0 /* Subquery id: Encode */ #define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1 +/* Query the max number of IBs per gang per submission */ +#define AMDGPU_INFO_MAX_IBS 0x22 #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff -- cgit v1.2.3 From 043dc33f443fd7abaf3fe076897503ce3d5dbc26 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Mar 2023 13:48:11 -0500 Subject: drm/amdgpu/UAPI: add new CS chunk for GFX shadow buffers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For GFX11, the UMD needs to allocate some shadow buffers to be used for preemption. The UMD allocates the buffers and passes the GPU virtual address to the kernel since the kernel will program the packet that specified these addresses as part of its IB submission frame. v2: UMD passes shadow init to tell kernel when to initialize the shadow Reviewed-by: Christian König Signed-off-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 6981e59a9401..fc094653b13f 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -592,6 +592,7 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07 #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08 #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09 +#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a struct drm_amdgpu_cs_chunk { __u32 chunk_id; @@ -708,6 +709,15 @@ struct drm_amdgpu_cs_chunk_data { }; }; +#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1 + +struct drm_amdgpu_cs_chunk_cp_gfx_shadow { + __u64 shadow_va; + __u64 csa_va; + __u64 gds_va; + __u64 flags; +}; + /* * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU * -- cgit v1.2.3 From edd9038000352ba846cba9dfb84d8c397c3b6499 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 9 Mar 2023 13:43:13 -0500 Subject: drm/amdgpu: add UAPI to query GFX shadow sizes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add UAPI to query the GFX shadow buffer requirements for preemption on GFX11. UMDs need to specify the shadow areas for preemption. v2: move into existing asic info query drop GDS as its use is determined by the UMD (Marek) v3: Update comments to note that alignment is base virtual alignment (Alex) Reviewed-by: Marek Olšák Signed-off-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index fc094653b13f..cc78528c3b4b 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -1138,6 +1138,14 @@ struct drm_amdgpu_info_device { __u64 mall_size; /* AKA infinity cache */ /* high 32 bits of the rb pipes mask */ __u32 enabled_rb_pipes_mask_hi; + /* shadow area size for gfx11 */ + __u32 shadow_size; + /* shadow area base virtual alignment for gfx11 */ + __u32 shadow_alignment; + /* context save area size for gfx11 */ + __u32 csa_size; + /* context save area base virtual alignment for gfx11 */ + __u32 csa_alignment; }; struct drm_amdgpu_info_hw_ip { -- cgit v1.2.3 From 99e7e3b60080d913ff2f94943f4af1f1b76a1c06 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 12 Apr 2023 01:29:26 +0300 Subject: drm/uapi: Document CTM matrix better MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Document in which order the CTM matrix elements are stored. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20230411222931.15127-2-ville.syrjala@linux.intel.com Reviewed-by: Xaver Hugl Acked-by: Simon Ser --- include/uapi/drm/drm_mode.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index 46becedf5b2f..43691058d28f 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -834,6 +834,11 @@ struct drm_color_ctm { /* * Conversion matrix in S31.32 sign-magnitude * (not two's complement!) format. + * + * out matrix in + * |R| |0 1 2| |R| + * |G| = |3 4 5| x |G| + * |B| |6 7 8| |B| */ __u64 matrix[9]; }; -- cgit v1.2.3 From 99afb7cc8c44578615200ea4806b183e1e35a81d Mon Sep 17 00:00:00 2001 From: Alan Previn Date: Thu, 11 May 2023 16:17:35 -0700 Subject: drm/i915/pxp: Add ARB session creation and cleanup Add MTL's function for ARB session creation using PXP firmware version 4.3 ABI structure format. While relooking at the ARB session creation flow in intel_pxp_start, let's address missing UAPI documentation. Without actually changing backward compatible behavior, update i915's drm-uapi comments that describe the possible error values when creating a context with I915_CONTEXT_PARAM_PROTECTED_CONTENT: Since the first merge of PXP support on ADL, i915 returns -ENXIO if a dependency such as firmware or component driver was yet to be loaded or returns -EIO if the creation attempt failed when requested by the PXP firmware (specific firmware error responses are reported in dmesg). Add MTL's function for ARB session invalidation but this reuses PXP firmware version 4.2 ABI structure format. For both cases, in the back-end gsccs functions for sending messages to the firmware inspect the GSC-CS-Mem-Header's pending-bit which means the GSC firmware is busy and we should retry. Given the last hw requirement, lets also update functions in front-end layer that wait for session creation or teardown completion to use new worst case timeout periods. Signed-off-by: Alan Previn Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Radhakrishna Sripada Link: https://patchwork.freedesktop.org/patch/msgid/20230511231738.1077674-6-alan.previn.teres.alexis@intel.com --- drivers/gpu/drm/i915/pxp/intel_pxp.c | 27 ++++- .../gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h | 21 ++++ drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c | 130 +++++++++++++++++++++ drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h | 12 +- drivers/gpu/drm/i915/pxp/intel_pxp_session.c | 11 +- include/uapi/drm/i915_drm.h | 15 +++ 6 files changed, 209 insertions(+), 7 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index 8949d4be7882..b600d68de2a4 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -291,6 +291,8 @@ static bool pxp_component_bound(struct intel_pxp *pxp) static int __pxp_global_teardown_final(struct intel_pxp *pxp) { + int timeout; + if (!pxp->arb_is_valid) return 0; /* @@ -300,7 +302,12 @@ static int __pxp_global_teardown_final(struct intel_pxp *pxp) intel_pxp_mark_termination_in_progress(pxp); intel_pxp_terminate(pxp, false); - if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250))) + if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) + timeout = GSCFW_MAX_ROUND_TRIP_LATENCY_MS; + else + timeout = 250; + + if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) return -ETIMEDOUT; return 0; @@ -308,6 +315,8 @@ static int __pxp_global_teardown_final(struct intel_pxp *pxp) static int __pxp_global_teardown_restart(struct intel_pxp *pxp) { + int timeout; + if (pxp->arb_is_valid) return 0; /* @@ -316,7 +325,12 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp) */ pxp_queue_termination(pxp); - if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250))) + if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) + timeout = GSCFW_MAX_ROUND_TRIP_LATENCY_MS; + else + timeout = 250; + + if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) return -ETIMEDOUT; return 0; @@ -354,8 +368,13 @@ int intel_pxp_start(struct intel_pxp *pxp) if (!intel_pxp_is_enabled(pxp)) return -ENODEV; - if (wait_for(pxp_component_bound(pxp), 250)) - return -ENXIO; + if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) { + if (wait_for(intel_pxp_gsccs_is_ready_for_sessions(pxp), 250)) + return -ENXIO; + } else { + if (wait_for(pxp_component_bound(pxp), 250)) + return -ENXIO; + } mutex_lock(&pxp->arb_mutex); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h index c65ada99e54f..09777719cd84 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_43.h @@ -11,6 +11,7 @@ /* PXP-Cmd-Op definitions */ #define PXP43_CMDID_START_HUC_AUTH 0x0000003A +#define PXP43_CMDID_INIT_SESSION 0x00000036 /* PXP-Packet sizes for MTL's GSCCS-HECI instruction */ #define PXP43_MAX_HECI_INOUT_SIZE (SZ_32K) @@ -26,4 +27,24 @@ struct pxp43_start_huc_auth_out { struct pxp_cmd_header header; } __packed; +/* PXP-Input-Packet: Init PXP session */ +struct pxp43_create_arb_in { + struct pxp_cmd_header header; + /* header.stream_id fields for vesion 4.3 of Init PXP session: */ + #define PXP43_INIT_SESSION_VALID BIT(0) + #define PXP43_INIT_SESSION_APPTYPE BIT(1) + #define PXP43_INIT_SESSION_APPID GENMASK(17, 2) + u32 protection_mode; + #define PXP43_INIT_SESSION_PROTECTION_ARB 0x2 + u32 sub_session_id; + u32 init_flags; + u32 rsvd[12]; +} __packed; + +/* PXP-Input-Packet: Init PXP session */ +struct pxp43_create_arb_out { + struct pxp_cmd_header header; + u32 rsvd[8]; +} __packed; + #endif /* __INTEL_PXP_FW_INTERFACE_43_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c index 16e3b73d0653..4bc276daca16 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c @@ -6,13 +6,46 @@ #include "gem/i915_gem_internal.h" #include "gt/intel_context.h" +#include "gt/uc/intel_gsc_fw.h" #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h" #include "i915_drv.h" +#include "intel_pxp_cmd_interface_42.h" #include "intel_pxp_cmd_interface_43.h" #include "intel_pxp_gsccs.h" #include "intel_pxp_types.h" +static bool +is_fw_err_platform_config(u32 type) +{ + switch (type) { + case PXP_STATUS_ERROR_API_VERSION: + case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF: + case PXP_STATUS_PLATFCONFIG_KF1_BAD: + return true; + default: + break; + } + return false; +} + +static const char * +fw_err_to_string(u32 type) +{ + switch (type) { + case PXP_STATUS_ERROR_API_VERSION: + return "ERR_API_VERSION"; + case PXP_STATUS_NOT_READY: + return "ERR_NOT_READY"; + case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF: + case PXP_STATUS_PLATFCONFIG_KF1_BAD: + return "ERR_PLATFORM_CONFIG"; + default: + break; + } + return NULL; +} + static int gsccs_send_message(struct intel_pxp *pxp, void *msg_in, size_t msg_in_size, @@ -152,6 +185,103 @@ gsccs_send_message_retry_complete(struct intel_pxp *pxp, return ret; } +bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp) +{ + /* + * GSC-fw loading, HuC-fw loading, HuC-fw authentication and + * GSC-proxy init flow (requiring an mei component driver) + * must all occur first before we can start requesting for PXP + * sessions. Checking for completion on HuC authentication and + * gsc-proxy init flow (the last set of dependencies that + * are out of order) will suffice. + */ + if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc) && + intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc)) + return true; + + return false; +} + +int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, + int arb_session_id) +{ + struct drm_i915_private *i915 = pxp->ctrl_gt->i915; + struct pxp43_create_arb_in msg_in = {0}; + struct pxp43_create_arb_out msg_out = {0}; + int ret; + + msg_in.header.api_version = PXP_APIVER(4, 3); + msg_in.header.command_id = PXP43_CMDID_INIT_SESSION; + msg_in.header.stream_id = (FIELD_PREP(PXP43_INIT_SESSION_APPID, arb_session_id) | + FIELD_PREP(PXP43_INIT_SESSION_VALID, 1) | + FIELD_PREP(PXP43_INIT_SESSION_APPTYPE, 0)); + msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); + msg_in.protection_mode = PXP43_INIT_SESSION_PROTECTION_ARB; + + ret = gsccs_send_message_retry_complete(pxp, + &msg_in, sizeof(msg_in), + &msg_out, sizeof(msg_out), NULL); + if (ret) { + drm_err(&i915->drm, "Failed to init session %d, ret=[%d]\n", arb_session_id, ret); + } else if (msg_out.header.status != 0) { + if (is_fw_err_platform_config(msg_out.header.status)) { + drm_info_once(&i915->drm, + "PXP init-session-%d failed due to BIOS/SOC:0x%08x:%s\n", + arb_session_id, msg_out.header.status, + fw_err_to_string(msg_out.header.status)); + } else { + drm_dbg(&i915->drm, "PXP init-session-%d failed 0x%08x:%st:\n", + arb_session_id, msg_out.header.status, + fw_err_to_string(msg_out.header.status)); + drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n", + msg_in.header.command_id, msg_in.header.api_version); + } + } + + return ret; +} + +void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id) +{ + struct drm_i915_private *i915 = pxp->ctrl_gt->i915; + struct pxp42_inv_stream_key_in msg_in = {0}; + struct pxp42_inv_stream_key_out msg_out = {0}; + int ret = 0; + + /* + * Stream key invalidation reuses the same version 4.2 input/output + * command format but firmware requires 4.3 API interaction + */ + msg_in.header.api_version = PXP_APIVER(4, 3); + msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY; + msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); + + msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1); + msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0); + msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id); + + ret = gsccs_send_message_retry_complete(pxp, + &msg_in, sizeof(msg_in), + &msg_out, sizeof(msg_out), NULL); + if (ret) { + drm_err(&i915->drm, "Failed to inv-stream-key-%u, ret=[%d]\n", + session_id, ret); + } else if (msg_out.header.status != 0) { + if (is_fw_err_platform_config(msg_out.header.status)) { + drm_info_once(&i915->drm, + "PXP inv-stream-key-%u failed due to BIOS/SOC :0x%08x:%s\n", + session_id, msg_out.header.status, + fw_err_to_string(msg_out.header.status)); + } else { + drm_dbg(&i915->drm, "PXP inv-stream-key-%u failed 0x%08x:%s:\n", + session_id, msg_out.header.status, + fw_err_to_string(msg_out.header.status)); + drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n", + msg_in.header.command_id, msg_in.header.api_version); + } + } +} + static void gsccs_cleanup_fw_host_session_handle(struct intel_pxp *pxp) { diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h index bd1c028bc80f..298ad38e6c7d 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h @@ -10,14 +10,22 @@ struct intel_pxp; -#define GSC_REPLY_LATENCY_MS 200 +#define GSC_REPLY_LATENCY_MS 210 +/* + * Max FW response time is 200ms, to which we add 10ms to account for overhead + * such as request preparation, GuC submission to hw and pipeline completion times. + */ #define GSC_PENDING_RETRY_MAXCOUNT 40 #define GSC_PENDING_RETRY_PAUSE_MS 50 +#define GSCFW_MAX_ROUND_TRIP_LATENCY_MS (GSC_PENDING_RETRY_MAXCOUNT * GSC_PENDING_RETRY_PAUSE_MS) #ifdef CONFIG_DRM_I915_PXP void intel_pxp_gsccs_fini(struct intel_pxp *pxp); int intel_pxp_gsccs_init(struct intel_pxp *pxp); +int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id); +void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id); + #else static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp) { @@ -30,4 +38,6 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp) #endif +bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp); + #endif /*__INTEL_PXP_GSCCS_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c index 7899079e17b0..e4d8242302c5 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -7,6 +7,7 @@ #include "intel_pxp.h" #include "intel_pxp_cmd.h" +#include "intel_pxp_gsccs.h" #include "intel_pxp_session.h" #include "intel_pxp_tee.h" #include "intel_pxp_types.h" @@ -62,7 +63,10 @@ static int pxp_create_arb_session(struct intel_pxp *pxp) return -EEXIST; } - ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION); + if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) + ret = intel_pxp_gsccs_create_session(pxp, ARB_SESSION); + else + ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION); if (ret) { drm_err(>->i915->drm, "tee cmd for arb session creation failed\n"); return ret; @@ -106,7 +110,10 @@ static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp) intel_uncore_write(gt->uncore, KCR_GLOBAL_TERMINATE(pxp->kcr_base), 1); - intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION); + if (HAS_ENGINE(gt, GSC0)) + intel_pxp_gsccs_end_arb_fw_session(pxp, ARB_SESSION); + else + intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION); return ret; } diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index dba7c5a5b25e..0aa3190e7654 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -2096,6 +2096,21 @@ struct drm_i915_gem_context_param { * * -ENODEV: feature not available * -EPERM: trying to mark a recoverable or not bannable context as protected + * -ENXIO: A dependency such as a component driver or firmware is not yet + * loaded so user space may need to attempt again. Depending on the + * device, this error may be reported if protected context creation is + * attempted very early after kernel start because the internal timeout + * waiting for such dependencies is not guaranteed to be larger than + * required (numbers differ depending on system and kernel config): + * - ADL/RPL: dependencies may take up to 3 seconds from kernel start + * while context creation internal timeout is 250 milisecs + * - MTL: dependencies may take up to 8 seconds from kernel start + * while context creation internal timeout is 250 milisecs + * NOTE: such dependencies happen once, so a subsequent call to create a + * protected context after a prior successful call will not experience + * such timeouts and will not return -ENXIO (unless the driver is reloaded, + * or, depending on the device, resumes from a suspended state). + * -EIO: The firmware did not succeed in creating the protected context. */ #define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd /* Must be kept compact -- no holes and well documented */ -- cgit v1.2.3 From d1da138f245d4fb46b21d2ddb19504a2831d813f Mon Sep 17 00:00:00 2001 From: Alan Previn Date: Thu, 11 May 2023 16:17:36 -0700 Subject: drm/i915/uapi/pxp: Add a GET_PARAM for PXP Because of the additional firmware, component-driver and initialization depedencies required on MTL platform before a PXP context can be created, UMD calling for PXP creation as a way to get-caps can take a long time. An actual real world customer stack has seen this happen in the 4-to-8 second range after the kernel starts (which sees MESA's init appear in the middle of this range as the compositor comes up). To avoid unncessary delays experienced by the UMD for get-caps purposes, add a GET_PARAM for I915_PARAM_PXP_SUPPORT. However, some failures can still occur after all the depedencies are met (such as firmware init flow failure, bios configurations or SOC fusing not allowing PXP enablement). Those scenarios will only be known to user space when it attempts creating a PXP context and is documented in the GEM UAPI headers. While making this change, create a helper that is common to both GET_PARAM caller and intel_pxp_start since the latter does similar checks. Signed-off-by: Alan Previn Reviewed-by: Daniele Ceraolo Spurio Acked-by: Jordan Justen Signed-off-by: Radhakrishna Sripada Link: https://patchwork.freedesktop.org/patch/msgid/20230511231738.1077674-7-alan.previn.teres.alexis@intel.com --- drivers/gpu/drm/i915/i915_getparam.c | 7 +++++++ drivers/gpu/drm/i915/pxp/intel_pxp.c | 29 ++++++++++++++++++++++------- drivers/gpu/drm/i915/pxp/intel_pxp.h | 1 + include/uapi/drm/i915_drm.h | 19 +++++++++++++++++++ 4 files changed, 49 insertions(+), 7 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 2238e096c957..6f11d7eaa91a 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -5,6 +5,8 @@ #include "gem/i915_gem_mman.h" #include "gt/intel_engine_user.h" +#include "pxp/intel_pxp.h" + #include "i915_cmd_parser.h" #include "i915_drv.h" #include "i915_getparam.h" @@ -102,6 +104,11 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, if (value < 0) return value; break; + case I915_PARAM_PXP_STATUS: + value = intel_pxp_get_readiness_status(i915->pxp); + if (value < 0) + return value; + break; case I915_PARAM_MMAP_GTT_VERSION: /* Though we've started our numbering from 1, and so class all * earlier versions as 0, in effect their value is undefined as diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index b600d68de2a4..f143eadbc253 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -358,23 +358,38 @@ void intel_pxp_end(struct intel_pxp *pxp) } /* - * the arb session is restarted from the irq work when we receive the - * termination completion interrupt + * this helper is used by both intel_pxp_start and by + * the GET_PARAM IOCTL that user space calls. Thus, the + * return values here should match the UAPI spec. */ -int intel_pxp_start(struct intel_pxp *pxp) +int intel_pxp_get_readiness_status(struct intel_pxp *pxp) { - int ret = 0; - if (!intel_pxp_is_enabled(pxp)) return -ENODEV; if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) { if (wait_for(intel_pxp_gsccs_is_ready_for_sessions(pxp), 250)) - return -ENXIO; + return 2; } else { if (wait_for(pxp_component_bound(pxp), 250)) - return -ENXIO; + return 2; } + return 1; +} + +/* + * the arb session is restarted from the irq work when we receive the + * termination completion interrupt + */ +int intel_pxp_start(struct intel_pxp *pxp) +{ + int ret = 0; + + ret = intel_pxp_get_readiness_status(pxp); + if (ret < 0) + return ret; + else if (ret > 1) + return -EIO; /* per UAPI spec, user may retry later */ mutex_lock(&pxp->arb_mutex); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h index 3ded0890cd27..17e6dbc86b38 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h @@ -26,6 +26,7 @@ void intel_pxp_fini_hw(struct intel_pxp *pxp); void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp); void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id); +int intel_pxp_get_readiness_status(struct intel_pxp *pxp); int intel_pxp_start(struct intel_pxp *pxp); void intel_pxp_end(struct intel_pxp *pxp); diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 0aa3190e7654..ba40855dbc93 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -771,6 +771,25 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57 +/* + * Query the status of PXP support in i915. + * + * The query can fail in the following scenarios with the listed error codes: + * -ENODEV = PXP support is not available on the GPU device or in the + * kernel due to missing component drivers or kernel configs. + * + * If the IOCTL is successful, the returned parameter will be set to one of + * the following values: + * 1 = PXP feature is supported and is ready for use. + * 2 = PXP feature is supported but should be ready soon (pending + * initialization of non-i915 system dependencies). + * + * NOTE: When param is supported (positive return values), user space should + * still refer to the GEM PXP context-creation UAPI header specs to be + * aware of possible failure due to system state machine at the time. + */ +#define I915_PARAM_PXP_STATUS 58 + /* Must be kept compact -- no holes and well documented */ /** -- cgit v1.2.3 From c7c12de893f808bd7c1215fe9056262295e5203b Mon Sep 17 00:00:00 2001 From: Juha-Pekka Heikkila Date: Sun, 14 May 2023 21:42:39 +0300 Subject: drm/fourcc: define Intel Meteorlake related ccs modifiers Add Tile4 type ccs modifiers with aux buffer needed for MTL Bspec: 49251, 49252, 49253 Cc: dri-devel@lists.freedesktop.org Cc: Jani Nikula Signed-off-by: Juha-Pekka Heikkila Reviewed-by: Matt Atwood Acked-by: Thomas Zimmermann Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20230514184240.6184-1-juhapekka.heikkila@gmail.com --- include/uapi/drm/drm_fourcc.h | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index de703c6be969..8db7fd3f743e 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -657,6 +657,49 @@ extern "C" { */ #define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12) +/* + * Intel Color Control Surfaces (CCS) for display ver. 14 render compression. + * + * The main surface is tile4 and at plane index 0, the CCS is linear and + * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in + * main surface. In other words, 4 bits in CCS map to a main surface cache + * line pair. The main surface pitch is required to be a multiple of four + * tile4 widths. + */ +#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13) + +/* + * Intel Color Control Surfaces (CCS) for display ver. 14 media compression + * + * The main surface is tile4 and at plane index 0, the CCS is linear and + * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in + * main surface. In other words, 4 bits in CCS map to a main surface cache + * line pair. The main surface pitch is required to be a multiple of four + * tile4 widths. For semi-planar formats like NV12, CCS planes follow the + * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces, + * planes 2 and 3 for the respective CCS. + */ +#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14) + +/* + * Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render + * compression. + * + * The main surface is tile4 and is at plane index 0 whereas CCS is linear + * and at index 1. The clear color is stored at index 2, and the pitch should + * be ignored. The clear color structure is 256 bits. The first 128 bits + * represents Raw Clear Color Red, Green, Blue and Alpha color each represented + * by 32 bits. The raw clear color is consumed by the 3d engine and generates + * the converted clear color of size 64 bits. The first 32 bits store the Lower + * Converted Clear Color value and the next 32 bits store the Higher Converted + * Clear Color value when applicable. The Converted Clear Color values are + * consumed by the DE. The last 64 bits are used to store Color Discard Enable + * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line + * corresponds to an area of 4x1 tiles in the main surface. The main surface + * pitch is required to be a multiple of 4 tile widths. + */ +#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15) + /* * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks * -- cgit v1.2.3 From bc4be0a38b63b6d4d00a58b10e164f56049be2c2 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 19 May 2023 08:49:45 -0700 Subject: drm/i915/pmu: Prepare for multi-tile non-engine counters Reserve some bits in the counter config namespace which will carry the tile id and prepare the code to handle this. No per tile counters have been added yet. v2: - Fix checkpatch issues - Use 4 bits for gt id in non-engine counters. Drop FIXME. - Set MAX GTs to 4. Drop FIXME. v3: (Ashutosh, Tvrtko) - Drop BUG_ON that would never fire - Make enable u64 - Pull in some code from next patch v4: Set I915_PMU_MAX_GTS to 2 (Tvrtko) v5: s/u64/u32 where needed (Ashutosh) Signed-off-by: Tvrtko Ursulin Signed-off-by: Umesh Nerlige Ramappa Reviewed-by: Ashutosh Dixit Link: https://patchwork.freedesktop.org/patch/msgid/20230519154946.3751971-7-umesh.nerlige.ramappa@intel.com --- drivers/gpu/drm/i915/i915_pmu.c | 146 +++++++++++++++++++++++++++++----------- drivers/gpu/drm/i915/i915_pmu.h | 9 ++- include/uapi/drm/i915_drm.h | 17 ++++- 3 files changed, 127 insertions(+), 45 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index ecb57a94143e..5cfc322e69b4 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -56,11 +56,21 @@ static bool is_engine_config(const u64 config) return config < __I915_PMU_OTHER(0); } +static unsigned int config_gt_id(const u64 config) +{ + return config >> __I915_PMU_GT_SHIFT; +} + +static u64 config_counter(const u64 config) +{ + return config & ~(~0ULL << __I915_PMU_GT_SHIFT); +} + static unsigned int other_bit(const u64 config) { unsigned int val; - switch (config) { + switch (config_counter(config)) { case I915_PMU_ACTUAL_FREQUENCY: val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED; break; @@ -78,7 +88,9 @@ static unsigned int other_bit(const u64 config) return -1; } - return I915_ENGINE_SAMPLE_COUNT + val; + return I915_ENGINE_SAMPLE_COUNT + + config_gt_id(config) * __I915_PMU_TRACKED_EVENT_COUNT + + val; } static unsigned int config_bit(const u64 config) @@ -115,6 +127,18 @@ static unsigned int event_bit(struct perf_event *event) return config_bit(event->attr.config); } +static u32 frequency_enabled_mask(void) +{ + unsigned int i; + u32 mask = 0; + + for (i = 0; i < I915_PMU_MAX_GTS; i++) + mask |= config_mask(__I915_PMU_ACTUAL_FREQUENCY(i)) | + config_mask(__I915_PMU_REQUESTED_FREQUENCY(i)); + + return mask; +} + static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) { struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); @@ -131,9 +155,7 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) * Mask out all the ones which do not need the timer, or in * other words keep all the ones that could need the timer. */ - enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) | - config_mask(I915_PMU_REQUESTED_FREQUENCY) | - ENGINE_SAMPLE_MASK; + enable &= frequency_enabled_mask() | ENGINE_SAMPLE_MASK; /* * When the GPU is idle per-engine counters do not need to be @@ -175,9 +197,37 @@ static inline s64 ktime_since_raw(const ktime_t kt) return ktime_to_ns(ktime_sub(ktime_get_raw(), kt)); } +static unsigned int +__sample_idx(struct i915_pmu *pmu, unsigned int gt_id, int sample) +{ + unsigned int idx = gt_id * __I915_NUM_PMU_SAMPLERS + sample; + + GEM_BUG_ON(idx >= ARRAY_SIZE(pmu->sample)); + + return idx; +} + +static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample) +{ + return pmu->sample[__sample_idx(pmu, gt_id, sample)].cur; +} + +static void +store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val) +{ + pmu->sample[__sample_idx(pmu, gt_id, sample)].cur = val; +} + +static void +add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul) +{ + pmu->sample[__sample_idx(pmu, gt_id, sample)].cur += mul_u32_u32(val, mul); +} + static u64 get_rc6(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; + const unsigned int gt_id = gt->info.id; struct i915_pmu *pmu = &i915->pmu; unsigned long flags; bool awake = false; @@ -192,7 +242,7 @@ static u64 get_rc6(struct intel_gt *gt) spin_lock_irqsave(&pmu->lock, flags); if (awake) { - pmu->sample[__I915_SAMPLE_RC6].cur = val; + store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val); } else { /* * We think we are runtime suspended. @@ -201,14 +251,14 @@ static u64 get_rc6(struct intel_gt *gt) * on top of the last known real value, as the approximated RC6 * counter value. */ - val = ktime_since_raw(pmu->sleep_last); - val += pmu->sample[__I915_SAMPLE_RC6].cur; + val = ktime_since_raw(pmu->sleep_last[gt_id]); + val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6); } - if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) - val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; + if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED)) + val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED); else - pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; + store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val); spin_unlock_irqrestore(&pmu->lock, flags); @@ -218,13 +268,20 @@ static u64 get_rc6(struct intel_gt *gt) static void init_rc6(struct i915_pmu *pmu) { struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); - intel_wakeref_t wakeref; + struct intel_gt *gt; + unsigned int i; + + for_each_gt(gt, i915, i) { + intel_wakeref_t wakeref; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) { + u64 val = __get_rc6(gt); - with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref) { - pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915)); - pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = - pmu->sample[__I915_SAMPLE_RC6].cur; - pmu->sleep_last = ktime_get_raw(); + store_sample(pmu, i, __I915_SAMPLE_RC6, val); + store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED, + val); + pmu->sleep_last[i] = ktime_get_raw(); + } } } @@ -232,8 +289,8 @@ static void park_rc6(struct intel_gt *gt) { struct i915_pmu *pmu = >->i915->pmu; - pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(gt); - pmu->sleep_last = ktime_get_raw(); + store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt)); + pmu->sleep_last[gt->info.id] = ktime_get_raw(); } static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) @@ -373,34 +430,30 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) } } -static void -add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) -{ - sample->cur += mul_u32_u32(val, mul); -} - -static bool frequency_sampling_enabled(struct i915_pmu *pmu) +static bool +frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt) { return pmu->enable & - (config_mask(I915_PMU_ACTUAL_FREQUENCY) | - config_mask(I915_PMU_REQUESTED_FREQUENCY)); + (config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt)) | + config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt))); } static void frequency_sample(struct intel_gt *gt, unsigned int period_ns) { struct drm_i915_private *i915 = gt->i915; + const unsigned int gt_id = gt->info.id; struct i915_pmu *pmu = &i915->pmu; struct intel_rps *rps = >->rps; - if (!frequency_sampling_enabled(pmu)) + if (!frequency_sampling_enabled(pmu, gt_id)) return; /* Report 0/0 (actual/requested) frequency while parked. */ if (!intel_gt_pm_get_if_awake(gt)) return; - if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) { + if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) { u32 val; /* @@ -416,12 +469,12 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns) if (!val) val = intel_gpu_freq(rps, rps->cur_freq); - add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], + add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT, val, period_ns / 1000); } - if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) { - add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], + if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) { + add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ, intel_rps_get_requested_frequency(rps), period_ns / 1000); } @@ -458,9 +511,7 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) continue; engines_sample(gt, period_ns); - - if (i == 0) /* FIXME */ - frequency_sample(gt, period_ns); + frequency_sample(gt, period_ns); } hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); @@ -502,7 +553,13 @@ config_status(struct drm_i915_private *i915, u64 config) { struct intel_gt *gt = to_gt(i915); - switch (config) { + unsigned int gt_id = config_gt_id(config); + unsigned int max_gt_id = HAS_EXTRA_GT_LIST(i915) ? 1 : 0; + + if (gt_id > max_gt_id) + return -ENOENT; + + switch (config_counter(config)) { case I915_PMU_ACTUAL_FREQUENCY: if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) /* Requires a mutex for sampling! */ @@ -513,6 +570,8 @@ config_status(struct drm_i915_private *i915, u64 config) return -ENODEV; break; case I915_PMU_INTERRUPTS: + if (gt_id) + return -ENOENT; break; case I915_PMU_RC6_RESIDENCY: if (!gt->rc6.supported) @@ -610,22 +669,27 @@ static u64 __i915_pmu_event_read(struct perf_event *event) val = engine->pmu.sample[sample].cur; } } else { - switch (event->attr.config) { + const unsigned int gt_id = config_gt_id(event->attr.config); + const u64 config = config_counter(event->attr.config); + + switch (config) { case I915_PMU_ACTUAL_FREQUENCY: val = - div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, + div_u64(read_sample(pmu, gt_id, + __I915_SAMPLE_FREQ_ACT), USEC_PER_SEC /* to MHz */); break; case I915_PMU_REQUESTED_FREQUENCY: val = - div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, + div_u64(read_sample(pmu, gt_id, + __I915_SAMPLE_FREQ_REQ), USEC_PER_SEC /* to MHz */); break; case I915_PMU_INTERRUPTS: val = READ_ONCE(pmu->irq_count); break; case I915_PMU_RC6_RESIDENCY: - val = get_rc6(to_gt(i915)); + val = get_rc6(i915->gt[gt_id]); break; case I915_PMU_SOFTWARE_GT_AWAKE_TIME: val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915))); diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 3a811266ac6a..33d80fbaab8b 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -38,13 +38,16 @@ enum { __I915_NUM_PMU_SAMPLERS }; +#define I915_PMU_MAX_GTS 2 + /* * How many different events we track in the global PMU mask. * * It is also used to know to needed number of event reference counters. */ #define I915_PMU_MASK_BITS \ - (I915_ENGINE_SAMPLE_COUNT + __I915_PMU_TRACKED_EVENT_COUNT) + (I915_ENGINE_SAMPLE_COUNT + \ + I915_PMU_MAX_GTS * __I915_PMU_TRACKED_EVENT_COUNT) #define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1) @@ -124,11 +127,11 @@ struct i915_pmu { * Only global counters are held here, while the per-engine ones are in * struct intel_engine_cs. */ - struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; + struct i915_pmu_sample sample[I915_PMU_MAX_GTS * __I915_NUM_PMU_SAMPLERS]; /** * @sleep_last: Last time GT parked for RC6 estimation. */ - ktime_t sleep_last; + ktime_t sleep_last[I915_PMU_MAX_GTS]; /** * @irq_count: Number of interrupts * diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index ba40855dbc93..f31dfacde601 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -280,7 +280,16 @@ enum drm_i915_pmu_engine_sample { #define I915_PMU_ENGINE_SEMA(class, instance) \ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA) -#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) +/* + * Top 4 bits of every non-engine counter are GT id. + */ +#define __I915_PMU_GT_SHIFT (60) + +#define ___I915_PMU_OTHER(gt, x) \ + (((__u64)__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | \ + ((__u64)(gt) << __I915_PMU_GT_SHIFT)) + +#define __I915_PMU_OTHER(x) ___I915_PMU_OTHER(0, x) #define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0) #define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1) @@ -290,6 +299,12 @@ enum drm_i915_pmu_engine_sample { #define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY +#define __I915_PMU_ACTUAL_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 0) +#define __I915_PMU_REQUESTED_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 1) +#define __I915_PMU_INTERRUPTS(gt) ___I915_PMU_OTHER(gt, 2) +#define __I915_PMU_RC6_RESIDENCY(gt) ___I915_PMU_OTHER(gt, 3) +#define __I915_PMU_SOFTWARE_GT_AWAKE_TIME(gt) ___I915_PMU_OTHER(gt, 4) + /* Each region is a minimum of 16k, and there are at most 255 of them. */ #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use -- cgit v1.2.3 From 98d2722a85c4ad5f2baf2272cbb0fab67f797b69 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 31 May 2023 16:54:12 -0700 Subject: drm/i915/huc: differentiate the 2 steps of the MTL HuC auth flow Before we add the second step of the MTL HuC auth (via GSC), we need to have the ability to differentiate between them. To do so, the huc authentication check is duplicated for GuC and GSC auth, with GSC-enabled binaries being considered fully authenticated only after the GSC auth step. To report the difference between the 2 auth steps, a new case is added to the HuC getparam. This way, the clear media driver can start submitting before full auth, as partial auth is enough for those workloads. v2: fix authentication status check for DG2 v3: add a better comment at the top of the HuC file to explain the different approaches to load and auth (John) v4: update call to intel_huc_is_authenticated in the pxp code to check for GSC authentication v5: drop references to meu and esclamation mark in huc_auth print (John) Signed-off-by: Daniele Ceraolo Spurio Cc: Alan Previn Cc: John Harrison Reviewed-by: Alan Previn #v2 Reviewed-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20230531235415.1467475-5-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 111 ++++++++++++++++++++++------- drivers/gpu/drm/i915/gt/uc/intel_huc.h | 16 +++-- drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c | 4 +- drivers/gpu/drm/i915/i915_reg.h | 3 + drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c | 2 +- include/uapi/drm/i915_drm.h | 3 +- 6 files changed, 104 insertions(+), 35 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index b1e7a1e1bfa3..4c8592a281f2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -10,6 +10,7 @@ #include "intel_huc.h" #include "intel_huc_print.h" #include "i915_drv.h" +#include "i915_reg.h" #include #include @@ -22,15 +23,23 @@ * capabilities by adding HuC specific commands to batch buffers. * * The kernel driver is only responsible for loading the HuC firmware and - * triggering its security authentication, which is performed by the GuC on - * older platforms and by the GSC on newer ones. For the GuC to correctly - * perform the authentication, the HuC binary must be loaded before the GuC one. + * triggering its security authentication. This is done differently depending + * on the platform: + * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA + * and the authentication via GuC + * - DG2: load and authentication are both performed via GSC. + * - MTL and newer platforms: the load is performed via DMA (same as with + * not-DG2 older platforms), while the authentication is done in 2-steps, + * a first auth for clear-media workloads via GuC and a second one for all + * workloads via GSC. + * On platforms where the GuC does the authentication, to correctly do so the + * HuC binary must be loaded before the GuC one. * Loading the HuC is optional; however, not using the HuC might negatively * impact power usage and/or performance of media workloads, depending on the * use-cases. * HuC must be reloaded on events that cause the WOPCM to lose its contents - * (S3/S4, FLR); GuC-authenticated HuC must also be reloaded on GuC/GT reset, - * while GSC-managed HuC will survive that. + * (S3/S4, FLR); on older platforms the HuC must also be reloaded on GuC/GT + * reset, while on newer ones it will survive that. * * See https://github.com/intel/media-driver for the latest details on HuC * functionality. @@ -106,7 +115,7 @@ static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrti { struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer); - if (!intel_huc_is_authenticated(huc)) { + if (!intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)) { if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC) huc_notice(huc, "timed out waiting for MEI GSC\n"); else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP) @@ -124,7 +133,7 @@ static void huc_delayed_load_start(struct intel_huc *huc) { ktime_t delay; - GEM_BUG_ON(intel_huc_is_authenticated(huc)); + GEM_BUG_ON(intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)); /* * On resume we don't have to wait for MEI-GSC to be re-probed, but we @@ -284,13 +293,23 @@ void intel_huc_init_early(struct intel_huc *huc) } if (GRAPHICS_VER(i915) >= 11) { - huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO; - huc->status.mask = HUC_LOAD_SUCCESSFUL; - huc->status.value = HUC_LOAD_SUCCESSFUL; + huc->status[INTEL_HUC_AUTH_BY_GUC].reg = GEN11_HUC_KERNEL_LOAD_INFO; + huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_LOAD_SUCCESSFUL; + huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_LOAD_SUCCESSFUL; } else { - huc->status.reg = HUC_STATUS2; - huc->status.mask = HUC_FW_VERIFIED; - huc->status.value = HUC_FW_VERIFIED; + huc->status[INTEL_HUC_AUTH_BY_GUC].reg = HUC_STATUS2; + huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_FW_VERIFIED; + huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_FW_VERIFIED; + } + + if (IS_DG2(i915)) { + huc->status[INTEL_HUC_AUTH_BY_GSC].reg = GEN11_HUC_KERNEL_LOAD_INFO; + huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL; + huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL; + } else { + huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS5(MTL_GSC_HECI1_BASE); + huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI_FWSTS5_HUC_AUTH_DONE; + huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI_FWSTS5_HUC_AUTH_DONE; } } @@ -397,28 +416,38 @@ void intel_huc_suspend(struct intel_huc *huc) delayed_huc_load_complete(huc); } -int intel_huc_wait_for_auth_complete(struct intel_huc *huc) +static const char *auth_mode_string(struct intel_huc *huc, + enum intel_huc_authentication_type type) +{ + bool partial = huc->fw.has_gsc_headers && type == INTEL_HUC_AUTH_BY_GUC; + + return partial ? "clear media" : "all workloads"; +} + +int intel_huc_wait_for_auth_complete(struct intel_huc *huc, + enum intel_huc_authentication_type type) { struct intel_gt *gt = huc_to_gt(huc); int ret; ret = __intel_wait_for_register(gt->uncore, - huc->status.reg, - huc->status.mask, - huc->status.value, + huc->status[type].reg, + huc->status[type].mask, + huc->status[type].value, 2, 50, NULL); /* mark the load process as complete even if the wait failed */ delayed_huc_load_complete(huc); if (ret) { - huc_err(huc, "firmware not verified %pe\n", ERR_PTR(ret)); + huc_err(huc, "firmware not verified for %s: %pe\n", + auth_mode_string(huc, type), ERR_PTR(ret)); intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); return ret; } intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); - huc_info(huc, "authenticated!\n"); + huc_info(huc, "authenticated for %s\n", auth_mode_string(huc, type)); return 0; } @@ -458,7 +487,7 @@ int intel_huc_auth(struct intel_huc *huc) } /* Check authentication status, it should be done by now */ - ret = intel_huc_wait_for_auth_complete(huc); + ret = intel_huc_wait_for_auth_complete(huc, INTEL_HUC_AUTH_BY_GUC); if (ret) goto fail; @@ -469,16 +498,29 @@ fail: return ret; } -bool intel_huc_is_authenticated(struct intel_huc *huc) +bool intel_huc_is_authenticated(struct intel_huc *huc, + enum intel_huc_authentication_type type) { struct intel_gt *gt = huc_to_gt(huc); intel_wakeref_t wakeref; u32 status = 0; with_intel_runtime_pm(gt->uncore->rpm, wakeref) - status = intel_uncore_read(gt->uncore, huc->status.reg); + status = intel_uncore_read(gt->uncore, huc->status[type].reg); - return (status & huc->status.mask) == huc->status.value; + return (status & huc->status[type].mask) == huc->status[type].value; +} + +static bool huc_is_fully_authenticated(struct intel_huc *huc) +{ + struct intel_uc_fw *huc_fw = &huc->fw; + + if (!huc_fw->has_gsc_headers) + return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC); + else if (intel_huc_is_loaded_by_gsc(huc) || HAS_ENGINE(huc_to_gt(huc), GSC0)) + return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC); + else + return false; } /** @@ -493,7 +535,9 @@ bool intel_huc_is_authenticated(struct intel_huc *huc) */ int intel_huc_check_status(struct intel_huc *huc) { - switch (__intel_uc_fw_status(&huc->fw)) { + struct intel_uc_fw *huc_fw = &huc->fw; + + switch (__intel_uc_fw_status(huc_fw)) { case INTEL_UC_FIRMWARE_NOT_SUPPORTED: return -ENODEV; case INTEL_UC_FIRMWARE_DISABLED: @@ -510,7 +554,17 @@ int intel_huc_check_status(struct intel_huc *huc) break; } - return intel_huc_is_authenticated(huc); + /* + * GSC-enabled binaries loaded via DMA are first partially + * authenticated by GuC and then fully authenticated by GSC + */ + if (huc_is_fully_authenticated(huc)) + return 1; /* full auth */ + else if (huc_fw->has_gsc_headers && !intel_huc_is_loaded_by_gsc(huc) && + intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC)) + return 2; /* clear media only */ + else + return 0; } static bool huc_has_delayed_load(struct intel_huc *huc) @@ -524,7 +578,10 @@ void intel_huc_update_auth_status(struct intel_huc *huc) if (!intel_uc_fw_is_loadable(&huc->fw)) return; - if (intel_huc_is_authenticated(huc)) + if (!huc->fw.has_gsc_headers) + return; + + if (huc_is_fully_authenticated(huc)) intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); else if (huc_has_delayed_load(huc)) @@ -557,5 +614,5 @@ void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p) with_intel_runtime_pm(gt->uncore->rpm, wakeref) drm_printf(p, "HuC status: 0x%08x\n", - intel_uncore_read(gt->uncore, huc->status.reg)); + intel_uncore_read(gt->uncore, huc->status[INTEL_HUC_AUTH_BY_GUC].reg)); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index 112f0dce4702..3f6aa7c37abc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -22,6 +22,12 @@ enum intel_huc_delayed_load_status { INTEL_HUC_DELAYED_LOAD_ERROR, }; +enum intel_huc_authentication_type { + INTEL_HUC_AUTH_BY_GUC = 0, + INTEL_HUC_AUTH_BY_GSC, + INTEL_HUC_AUTH_MAX_MODES +}; + struct intel_huc { /* Generic uC firmware management */ struct intel_uc_fw fw; @@ -31,7 +37,7 @@ struct intel_huc { i915_reg_t reg; u32 mask; u32 value; - } status; + } status[INTEL_HUC_AUTH_MAX_MODES]; struct { struct i915_sw_fence fence; @@ -49,10 +55,12 @@ int intel_huc_init(struct intel_huc *huc); void intel_huc_fini(struct intel_huc *huc); void intel_huc_suspend(struct intel_huc *huc); int intel_huc_auth(struct intel_huc *huc); -int intel_huc_wait_for_auth_complete(struct intel_huc *huc); +int intel_huc_wait_for_auth_complete(struct intel_huc *huc, + enum intel_huc_authentication_type type); +bool intel_huc_is_authenticated(struct intel_huc *huc, + enum intel_huc_authentication_type type); int intel_huc_check_status(struct intel_huc *huc); void intel_huc_update_auth_status(struct intel_huc *huc); -bool intel_huc_is_authenticated(struct intel_huc *huc); void intel_huc_register_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus); void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus); @@ -81,7 +89,7 @@ static inline bool intel_huc_is_loaded_by_gsc(const struct intel_huc *huc) static inline bool intel_huc_wait_required(struct intel_huc *huc) { return intel_huc_is_used(huc) && intel_huc_is_loaded_by_gsc(huc) && - !intel_huc_is_authenticated(huc); + !intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC); } void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index 89a887d33b77..ac2ae5f5011e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -161,7 +161,7 @@ int intel_huc_fw_load_and_auth_via_gsc(struct intel_huc *huc) * component gets re-bound and this function called again. If so, just * mark the HuC as loaded. */ - if (intel_huc_is_authenticated(huc)) { + if (intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)) { intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); return 0; } @@ -174,7 +174,7 @@ int intel_huc_fw_load_and_auth_via_gsc(struct intel_huc *huc) intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_TRANSFERRED); - return intel_huc_wait_for_auth_complete(huc); + return intel_huc_wait_for_auth_complete(huc, INTEL_HUC_AUTH_BY_GSC); } /** diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c4197e31962e..e0fc0f588c45 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -941,6 +941,9 @@ #define HECI_H_GS1(base) _MMIO((base) + 0xc4c) #define HECI_H_GS1_ER_PREP REG_BIT(0) +#define HECI_FWSTS5(base) _MMIO((base) + 0xc68) +#define HECI_FWSTS5_HUC_AUTH_DONE (1 << 19) + #define HSW_GTT_CACHE_EN _MMIO(0x4024) #define GTT_CACHE_EN_ALL 0xF0007FFF #define GEN7_WR_WATERMARK _MMIO(0x4028) diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c index a217821eb0fb..f13890ec7db1 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c @@ -196,7 +196,7 @@ bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp) * gsc-proxy init flow (the last set of dependencies that * are out of order) will suffice. */ - if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc) && + if (intel_huc_is_authenticated(&pxp->ctrl_gt->uc.huc, INTEL_HUC_AUTH_BY_GSC) && intel_gsc_uc_fw_proxy_init_done(&pxp->ctrl_gt->uc.gsc)) return true; diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index f31dfacde601..a1848e806059 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -674,7 +674,8 @@ typedef struct drm_i915_irq_wait { * If the IOCTL is successful, the returned parameter will be set to one of the * following values: * * 0 if HuC firmware load is not complete, - * * 1 if HuC firmware is authenticated and running. + * * 1 if HuC firmware is loaded and fully authenticated, + * * 2 if HuC firmware is loaded and authenticated for clear media only */ #define I915_PARAM_HUC_STATUS 42 -- cgit v1.2.3 From 81b1b599dfd71c958418dad586fa72c8d30d1065 Mon Sep 17 00:00:00 2001 From: Fei Yang Date: Tue, 6 Jun 2023 12:00:42 +0200 Subject: drm/i915: Allow user to set cache at BO creation To comply with the design that buffer objects shall have immutable cache setting through out their life cycle, {set, get}_caching ioctl's are no longer supported from MTL onward. With that change caching policy can only be set at object creation time. The current code applies a default (platform dependent) cache setting for all objects. However this is not optimal for performance tuning. The patch extends the existing gem_create uAPI to let user set PAT index for the object at creation time. The new extension is platform independent, so UMD's can switch to using this extension for older platforms as well, while {set, get}_caching are still supported on these legacy paltforms for compatibility reason. However, since PAT index was not clearly defined for platforms prior to GEN12 (TGL), so we are limiting this externsion to GEN12+ platforms only. See ext_set_pat() in for the implementation details. The documentation related to the PAT/MOCS tables is currently available for Tiger Lake here: https://www.intel.com/content/www/us/en/docs/graphics-for-linux/developer-reference/1-0/tiger-lake.html The documentation for other platforms is currently being updated. BSpec: 45101 Mesa support has been submitted in this merge request: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22878 The media driver supprt has bin submitted in this merge request: https://github.com/intel/media-driver/pull/1680 The IGT test related to this change is igt@gem_create@create-ext-set-pat Signed-off-by: Fei Yang Cc: Chris Wilson Cc: Matt Roper Cc: Andi Shyti Reviewed-by: Andi Shyti Acked-by: Jordan Justen Tested-by: Jordan Justen Acked-by: Carl Zhang Tested-by: Lihao Gu Signed-off-by: Andi Shyti Acked-by: Tvrtko Ursulin Acked-by: Slawomir Milczarek Link: https://patchwork.freedesktop.org/patch/msgid/20230606100042.482345-2-andi.shyti@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_create.c | 40 +++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_object.c | 6 +++++ include/uapi/drm/i915_drm.h | 41 ++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index bfe1dbda4cb7..d24c0ce8805c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -245,6 +245,7 @@ struct create_ext { unsigned int n_placements; unsigned int placement_mask; unsigned long flags; + unsigned int pat_index; }; static void repr_placements(char *buf, size_t size, @@ -394,11 +395,43 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data return 0; } +static int ext_set_pat(struct i915_user_extension __user *base, void *data) +{ + struct create_ext *ext_data = data; + struct drm_i915_private *i915 = ext_data->i915; + struct drm_i915_gem_create_ext_set_pat ext; + unsigned int max_pat_index; + + BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) != + offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd)); + + /* Limiting the extension only to Meteor Lake */ + if (!IS_METEORLAKE(i915)) + return -ENODEV; + + if (copy_from_user(&ext, base, sizeof(ext))) + return -EFAULT; + + max_pat_index = INTEL_INFO(i915)->max_pat_index; + + if (ext.pat_index > max_pat_index) { + drm_dbg(&i915->drm, "PAT index is invalid: %u\n", + ext.pat_index); + return -EINVAL; + } + + ext_data->pat_index = ext.pat_index; + + return 0; +} + static const i915_user_extension_fn create_extensions[] = { [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements, [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected, + [I915_GEM_CREATE_EXT_SET_PAT] = ext_set_pat, }; +#define PAT_INDEX_NOT_SET 0xffff /** * i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it. * @dev: drm device pointer @@ -418,6 +451,7 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data, if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) return -EINVAL; + ext_data.pat_index = PAT_INDEX_NOT_SET; ret = i915_user_extensions(u64_to_user_ptr(args->extensions), create_extensions, ARRAY_SIZE(create_extensions), @@ -454,5 +488,11 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data, if (IS_ERR(obj)) return PTR_ERR(obj); + if (ext_data.pat_index != PAT_INDEX_NOT_SET) { + i915_gem_object_set_pat_index(obj, ext_data.pat_index); + /* Mark pat_index is set by UMD */ + obj->pat_set_by_user = true; + } + return i915_gem_publish(obj, file, &args->size, &args->handle); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 46a19b099ec8..97ac6fb37958 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -208,6 +208,12 @@ bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj) if (!(obj->flags & I915_BO_ALLOC_USER)) return false; + /* + * Always flush cache for UMD objects at creation time. + */ + if (obj->pat_set_by_user) + return true; + /* * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it * possible for userspace to bypass the GTT caching bits set by the diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index a1848e806059..7000e5910a1d 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -3680,9 +3680,13 @@ struct drm_i915_gem_create_ext { * * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see * struct drm_i915_gem_create_ext_protected_content. + * + * For I915_GEM_CREATE_EXT_SET_PAT usage see + * struct drm_i915_gem_create_ext_set_pat. */ #define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 #define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1 +#define I915_GEM_CREATE_EXT_SET_PAT 2 __u64 extensions; }; @@ -3797,6 +3801,43 @@ struct drm_i915_gem_create_ext_protected_content { __u32 flags; }; +/** + * struct drm_i915_gem_create_ext_set_pat - The + * I915_GEM_CREATE_EXT_SET_PAT extension. + * + * If this extension is provided, the specified caching policy (PAT index) is + * applied to the buffer object. + * + * Below is an example on how to create an object with specific caching policy: + * + * .. code-block:: C + * + * struct drm_i915_gem_create_ext_set_pat set_pat_ext = { + * .base = { .name = I915_GEM_CREATE_EXT_SET_PAT }, + * .pat_index = 0, + * }; + * struct drm_i915_gem_create_ext create_ext = { + * .size = PAGE_SIZE, + * .extensions = (uintptr_t)&set_pat_ext, + * }; + * + * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext); + * if (err) ... + */ +struct drm_i915_gem_create_ext_set_pat { + /** @base: Extension link. See struct i915_user_extension. */ + struct i915_user_extension base; + /** + * @pat_index: PAT index to be set + * PAT index is a bit field in Page Table Entry to control caching + * behaviors for GPU accesses. The definition of PAT index is + * platform dependent and can be found in hardware specifications, + */ + __u32 pat_index; + /** @rsvd: reserved for future use */ + __u32 rsvd; +}; + /* ID of the protected content session managed by i915 when PXP is active */ #define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf -- cgit v1.2.3 From 92a3a9b1a3997cf67aca7874e7f61b102ea2f27c Mon Sep 17 00:00:00 2001 From: Dani Liberman Date: Thu, 20 Apr 2023 19:33:34 +0300 Subject: accel/habanalabs: add description to several info ioctls Several info ioctls may return success although no data retrieved. Signed-off-by: Dani Liberman Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- include/uapi/drm/habanalabs_accel.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/habanalabs_accel.h b/include/uapi/drm/habanalabs_accel.h index d9ef1b151d04..e6436f3e8ea6 100644 --- a/include/uapi/drm/habanalabs_accel.h +++ b/include/uapi/drm/habanalabs_accel.h @@ -787,18 +787,28 @@ enum hl_server_type { * The address which accessing it caused the razwi. * Razwi initiator. * Razwi cause, was it a page fault or MMU access error. + * May return 0 even though no new data is available, in that case + * timestamp will be 0. * HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation * HL_INFO_SECURED_ATTESTATION - Retrieve attestation report of the boot. * HL_INFO_REGISTER_EVENTFD - Register eventfd for event notifications. * HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd * HL_INFO_GET_EVENTS - Retrieve the last occurred events * HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information. + * May return 0 even though no new data is available, in that case + * timestamp will be 0. * HL_INFO_ENGINE_STATUS - Retrieve the status of all the h/w engines in the asic. * HL_INFO_PAGE_FAULT_EVENT - Retrieve parameters of captured page fault. + * May return 0 even though no new data is available, in that case + * timestamp will be 0. * HL_INFO_USER_MAPPINGS - Retrieve user mappings, captured after page fault event. * HL_INFO_FW_GENERIC_REQ - Send generic request to FW. * HL_INFO_HW_ERR_EVENT - Retrieve information on the reported HW error. + * May return 0 even though no new data is available, in that case + * timestamp will be 0. * HL_INFO_FW_ERR_EVENT - Retrieve information on the reported FW error. + * May return 0 even though no new data is available, in that case + * timestamp will be 0. */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 -- cgit v1.2.3 From 489763af891d5dc35c0b64e18af284d6591286cf Mon Sep 17 00:00:00 2001 From: Pierre-Eric Pelloux-Prayer Date: Mon, 24 Apr 2023 19:25:32 +0200 Subject: drm/amdgpu: add new flag to AMDGPU_CTX_QUERY2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit OpenGL EXT_robustness extension expects the driver to stop reporting GUILTY_CONTEXT_RESET when the reset has completed and the GPU is ready to accept submission again. This commit adds a AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS flag, that let the UMD know that the reset is still not finished. Mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22290 Reviewed-by: Christian König Reviewed-by: André Almeida Signed-off-by: Pierre-Eric Pelloux-Prayer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- include/uapi/drm/amdgpu_drm.h | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index d2139ac12159..e1f642a3dc2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -576,6 +576,9 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, if (atomic_read(&ctx->guilty)) out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; + if (amdgpu_in_reset(adev)) + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS; + if (adev->ras_enabled && con) { /* Return the cached values in O(1), * and schedule delayed work to cache diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b987022e11b4..45544ebe576e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -111,9 +111,10 @@ * tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size, * gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi * 3.53.0 - Support for GFX11 CP GFX shadowing + * 3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 53 +#define KMS_DRIVER_MINOR 54 #define KMS_DRIVER_PATCHLEVEL 0 unsigned int amdgpu_vram_limit = UINT_MAX; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index cc78528c3b4b..79b14828d542 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -245,6 +245,8 @@ union drm_amdgpu_bo_list { /* indicate some errors are detected by RAS */ #define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3) #define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4) +/* indicate that the reset hasn't completed yet */ +#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5) /* Context priority level */ #define AMDGPU_CTX_PRIORITY_UNSET -2048 -- cgit v1.2.3 From ef75a6ef37235e211bbdb17c25e5f79c55df1750 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Thu, 3 Mar 2022 10:56:05 -0500 Subject: drm/amdkfd: Update coherence settings for svm ranges Recently introduced commit "drm/amdgpu: Set cache coherency for GC 9.4.3" did not update the settings applicable for svm ranges. Add the coherence settings for svm ranges for GFX IP 9.4.3. Reviewed-by: Amber Lin Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 17 +++++++++++++++++ include/uapi/linux/kfd_ioctl.h | 2 ++ 2 files changed, 19 insertions(+) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 96ccff79902c..4b4f3bf8b823 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1159,6 +1159,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, uint64_t pte_flags; bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN); bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT; + bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED; if (domain == SVM_RANGE_VRAM_DOMAIN) bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); @@ -1198,6 +1199,22 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } break; + case IP_VERSION(9, 4, 3): + //TODO: Need more work for handling multiple memory partitions + //e.g. NPS4. Current approch is only applicable without memory + //partitions. + snoop = true; + if (uncached) + mapping_flags |= AMDGPU_VM_MTYPE_UC; + /* local HBM region close to partition*/ + else if (bo_adev == adev) + mapping_flags |= AMDGPU_VM_MTYPE_RW; + /* local HBM region far from partition or remote XGMI GPU or + * system memory + */ + else + mapping_flags |= AMDGPU_VM_MTYPE_NC; + break; default: mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 2da5c3ad71bd..2a9671e1ddb5 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -623,6 +623,8 @@ enum kfd_mmio_remap { #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 /* Keep GPU memory mapping always valid as if XNACK is disable */ #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 +/* Uncached access to memory */ +#define KFD_IOCTL_SVM_FLAG_UNCACHED 0x00000080 /** * kfd_ioctl_svm_op - SVM ioctl operations -- cgit v1.2.3 From ba3c87fffb79311f54464288c66421d19c2c1234 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 2 Jun 2023 12:58:05 -0400 Subject: amd/amdkfd: drop unused KFD_IOCTL_SVM_FLAG_UNCACHED flag Was leftover from GC 9.4.3 bring up and is currently unused. Drop it for now. Cc: Philip.Yang@amd.com Cc: rajneesh.bhardwaj@amd.com Cc: Felix.Kuehling@amd.com Reviewed-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 2 +- include/uapi/linux/kfd_ioctl.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 615eab3f78c9..5ff1a5a89d96 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1154,7 +1154,7 @@ svm_range_get_pte_flags(struct kfd_node *node, uint64_t pte_flags; bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN); bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT; - bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED; + bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/ unsigned int mtype_local; if (domain == SVM_RANGE_VRAM_DOMAIN) diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 2a9671e1ddb5..2da5c3ad71bd 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -623,8 +623,6 @@ enum kfd_mmio_remap { #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 /* Keep GPU memory mapping always valid as if XNACK is disable */ #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 -/* Uncached access to memory */ -#define KFD_IOCTL_SVM_FLAG_UNCACHED 0x00000080 /** * kfd_ioctl_svm_op - SVM ioctl operations -- cgit v1.2.3 From 4f98cf2baf9faee5b6f2f7889dad7c0f7686a787 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 2 Mar 2022 14:30:12 -0500 Subject: drm/amdkfd: add debug and runtime enable interface Introduce the GPU debug operations interface. For ROCm-GDB to extend the GNU Debugger's ability to inspect the AMD GPU instruction set, provide the necessary interface to allow the debugger to HW debug-mode set and query exceptions per HSA queue, process or device. The runtime_enable interface coordinates exception handling with the HSA runtime. Usage is available in the kern docs at uapi/linux/kfd_ioctl.h. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 48 +++ include/uapi/linux/kfd_ioctl.h | 668 ++++++++++++++++++++++++++++++- 2 files changed, 715 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index a9efff94390b..00e34125987c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2729,6 +2729,48 @@ static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data) return ret; } +static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data) +{ + return 0; +} + +static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data) +{ + struct kfd_ioctl_dbg_trap_args *args = data; + int r = 0; + + if (sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Debugging does not support sched_policy %i", sched_policy); + return -EINVAL; + } + + switch (args->op) { + case KFD_IOC_DBG_TRAP_ENABLE: + case KFD_IOC_DBG_TRAP_DISABLE: + case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT: + case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED: + case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE: + case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE: + case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES: + case KFD_IOC_DBG_TRAP_RESUME_QUEUES: + case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH: + case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH: + case KFD_IOC_DBG_TRAP_SET_FLAGS: + case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT: + case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO: + case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: + case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: + pr_warn("Debugging not supported yet\n"); + r = -EACCES; + break; + default: + pr_err("Invalid option: %i\n", args->op); + r = -EINVAL; + } + + return r; +} + #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ .cmd_drv = 0, .name = #ioctl} @@ -2841,6 +2883,12 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF, kfd_ioctl_export_dmabuf, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_RUNTIME_ENABLE, + kfd_ioctl_runtime_enable, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP, + kfd_ioctl_set_debug_trap, 0), }; #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 2da5c3ad71bd..32913d674d38 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -110,6 +110,32 @@ struct kfd_ioctl_get_available_memory_args { __u32 pad; }; +struct kfd_dbg_device_info_entry { + __u64 exception_status; + __u64 lds_base; + __u64 lds_limit; + __u64 scratch_base; + __u64 scratch_limit; + __u64 gpuvm_base; + __u64 gpuvm_limit; + __u32 gpu_id; + __u32 location_id; + __u32 vendor_id; + __u32 device_id; + __u32 revision_id; + __u32 subsystem_vendor_id; + __u32 subsystem_device_id; + __u32 fw_version; + __u32 gfx_target_version; + __u32 simd_count; + __u32 max_waves_per_simd; + __u32 array_count; + __u32 simd_arrays_per_engine; + __u32 num_xcc; + __u32 capability; + __u32 debug_prop; +}; + /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ #define KFD_IOC_CACHE_POLICY_COHERENT 0 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 @@ -773,6 +799,640 @@ struct kfd_ioctl_set_xnack_mode_args { __s32 xnack_enabled; }; +/* Wave launch override modes */ +enum kfd_dbg_trap_override_mode { + KFD_DBG_TRAP_OVERRIDE_OR = 0, + KFD_DBG_TRAP_OVERRIDE_REPLACE = 1 +}; + +/* Wave launch overrides */ +enum kfd_dbg_trap_mask { + KFD_DBG_TRAP_MASK_FP_INVALID = 1, + KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2, + KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4, + KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8, + KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16, + KFD_DBG_TRAP_MASK_FP_INEXACT = 32, + KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64, + KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128, + KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256, + KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30), + KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31) +}; + +/* Wave launch modes */ +enum kfd_dbg_trap_wave_launch_mode { + KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0, + KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1, + KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3 +}; + +/* Address watch modes */ +enum kfd_dbg_trap_address_watch_mode { + KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0, + KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1, + KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2, + KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3 +}; + +/* Additional wave settings */ +enum kfd_dbg_trap_flags { + KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1, +}; + +/* Trap exceptions */ +enum kfd_dbg_trap_exception_code { + EC_NONE = 0, + /* per queue */ + EC_QUEUE_WAVE_ABORT = 1, + EC_QUEUE_WAVE_TRAP = 2, + EC_QUEUE_WAVE_MATH_ERROR = 3, + EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4, + EC_QUEUE_WAVE_MEMORY_VIOLATION = 5, + EC_QUEUE_WAVE_APERTURE_VIOLATION = 6, + EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16, + EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17, + EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18, + EC_QUEUE_PACKET_RESERVED = 19, + EC_QUEUE_PACKET_UNSUPPORTED = 20, + EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21, + EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22, + EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23, + EC_QUEUE_PREEMPTION_ERROR = 30, + EC_QUEUE_NEW = 31, + /* per device */ + EC_DEVICE_QUEUE_DELETE = 32, + EC_DEVICE_MEMORY_VIOLATION = 33, + EC_DEVICE_RAS_ERROR = 34, + EC_DEVICE_FATAL_HALT = 35, + EC_DEVICE_NEW = 36, + /* per process */ + EC_PROCESS_RUNTIME = 48, + EC_PROCESS_DEVICE_REMOVE = 49, + EC_MAX +}; + +/* Mask generated by ecode in kfd_dbg_trap_exception_code */ +#define KFD_EC_MASK(ecode) (1ULL << (ecode - 1)) + +/* Masks for exception code type checks below */ +#define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \ + KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \ + KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \ + KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \ + KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \ + KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \ + KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \ + KFD_EC_MASK(EC_QUEUE_NEW)) +#define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \ + KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \ + KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \ + KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \ + KFD_EC_MASK(EC_DEVICE_NEW)) +#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \ + KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE)) + +/* Checks for exception code types for KFD search */ +#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \ + (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE)) +#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \ + (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE)) +#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \ + (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS)) + + +/* Runtime enable states */ +enum kfd_dbg_runtime_state { + DEBUG_RUNTIME_STATE_DISABLED = 0, + DEBUG_RUNTIME_STATE_ENABLED = 1, + DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2, + DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3 +}; + +/* Runtime enable status */ +struct kfd_runtime_info { + __u64 r_debug; + __u32 runtime_state; + __u32 ttmp_setup; +}; + +/* Enable modes for runtime enable */ +#define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1 +#define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2 + +/** + * kfd_ioctl_runtime_enable_args - Arguments for runtime enable + * + * Coordinates debug exception signalling and debug device enablement with runtime. + * + * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger + * @mode_mask - mask to set mode + * KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable + * KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable) + * @capabilities_mask - mask to notify runtime on what KFD supports + * + * Return - 0 on SUCCESS. + * - EBUSY if runtime enable call already pending. + * - EEXIST if user queues already active prior to call. + * If process is debug enabled, runtime enable will enable debug devices and + * wait for debugger process to send runtime exception EC_PROCESS_RUNTIME + * to unblock - see kfd_ioctl_dbg_trap_args. + * + */ +struct kfd_ioctl_runtime_enable_args { + __u64 r_debug; + __u32 mode_mask; + __u32 capabilities_mask; +}; + +/* Queue information */ +struct kfd_queue_snapshot_entry { + __u64 exception_status; + __u64 ring_base_address; + __u64 write_pointer_address; + __u64 read_pointer_address; + __u64 ctx_save_restore_address; + __u32 queue_id; + __u32 gpu_id; + __u32 ring_size; + __u32 queue_type; + __u32 ctx_save_restore_area_size; + __u32 reserved; +}; + +/* Queue status return for suspend/resume */ +#define KFD_DBG_QUEUE_ERROR_BIT 30 +#define KFD_DBG_QUEUE_INVALID_BIT 31 +#define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT) +#define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT) + +/* Context save area header information */ +struct kfd_context_save_area_header { + struct { + __u32 control_stack_offset; + __u32 control_stack_size; + __u32 wave_state_offset; + __u32 wave_state_size; + } wave_state; + __u32 debug_offset; + __u32 debug_size; + __u64 err_payload_addr; + __u32 err_event_id; + __u32 reserved1; +}; + +/* + * Debug operations + * + * For specifics on usage and return values, see documentation per operation + * below. Otherwise, generic error returns apply: + * - ESRCH if the process to debug does not exist. + * + * - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation + * KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior. + * Also returns this error if GPU hardware scheduling is not supported. + * + * - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not + * PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow + * clean up of debug mode as long as process is debug enabled. + * + * - EACCES if any DBG_HW_OP (debug hardware operation) is requested when + * AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior. + * + * - ENODEV if any GPU does not support debugging on a DBG_HW_OP call. + * + * - Other errors may be returned when a DBG_HW_OP occurs while the GPU + * is in a fatal state. + * + */ +enum kfd_dbg_trap_operations { + KFD_IOC_DBG_TRAP_ENABLE = 0, + KFD_IOC_DBG_TRAP_DISABLE = 1, + KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2, + KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3, + KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */ + KFD_IOC_DBG_TRAP_SET_FLAGS = 10, + KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11, + KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12, + KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13, + KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14 +}; + +/** + * kfd_ioctl_dbg_trap_enable_args + * + * Arguments for KFD_IOC_DBG_TRAP_ENABLE. + * + * Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in + * kfd_ioctl_dbg_trap_args to disable debug session. + * + * @exception_mask (IN) - exceptions to raise to the debugger + * @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info) + * @rinfo_size (IN/OUT) - size of runtime info buffer in bytes + * @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised + * exceptions set in exception_mask. + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable. + * Size of kfd_runtime saved by the KFD returned to @rinfo_size. + * - EBADF if KFD cannot get a reference to dbg_fd. + * - EFAULT if KFD cannot copy runtime info to rinfo_ptr. + * - EINVAL if target process is already debug enabled. + * + */ +struct kfd_ioctl_dbg_trap_enable_args { + __u64 exception_mask; + __u64 rinfo_ptr; + __u32 rinfo_size; + __u32 dbg_fd; +}; + +/** + * kfd_ioctl_dbg_trap_send_runtime_event_args + * + * + * Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT. + * Raises exceptions to runtime. + * + * @exception_mask (IN) - exceptions to raise to runtime + * @gpu_id (IN) - target device id + * @queue_id (IN) - target queue id + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * - ENODEV if gpu_id not found. + * If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending + * AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args. + * All other exceptions are raised to runtime through err_payload_addr. + * See kfd_context_save_area_header. + */ +struct kfd_ioctl_dbg_trap_send_runtime_event_args { + __u64 exception_mask; + __u32 gpu_id; + __u32 queue_id; +}; + +/** + * kfd_ioctl_dbg_trap_set_exceptions_enabled_args + * + * Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED + * Set new exceptions to be raised to the debugger. + * + * @exception_mask (IN) - new exceptions to raise the debugger + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + */ +struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args { + __u64 exception_mask; +}; + +/** + * kfd_ioctl_dbg_trap_set_wave_launch_override_args + * + * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE + * Enable HW exceptions to raise trap. + * + * @override_mode (IN) - see kfd_dbg_trap_override_mode + * @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask. + * IN is the override modes requested to be enabled. + * OUT is referenced in Return below. + * @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask. + * IN is the override modes requested for support check. + * OUT is referenced in Return below. + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * Previous enablement is returned in @enable_mask. + * Actual override support is returned in @support_request_mask. + * - EINVAL if override mode is not supported. + * - EACCES if trap support requested is not actually supported. + * i.e. enable_mask (IN) is not a subset of support_request_mask (OUT). + * Otherwise it is considered a generic error (see kfd_dbg_trap_operations). + */ +struct kfd_ioctl_dbg_trap_set_wave_launch_override_args { + __u32 override_mode; + __u32 enable_mask; + __u32 support_request_mask; + __u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_set_wave_launch_mode_args + * + * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE + * Set wave launch mode. + * + * @mode (IN) - see kfd_dbg_trap_wave_launch_mode + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + */ +struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args { + __u32 launch_mode; + __u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_suspend_queues_ags + * + * Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES + * Suspend queues. + * + * @exception_mask (IN) - raised exceptions to clear + * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) + * to suspend + * @num_queues (IN) - number of queues to suspend in @queue_array_ptr + * @grace_period (IN) - wave time allowance before preemption + * per 1K GPU clock cycle unit + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Destruction of a suspended queue is blocked until the queue is + * resumed. This allows the debugger to access queue information and + * the its context save area without running into a race condition on + * queue destruction. + * Automatically copies per queue context save area header information + * into the save area base + * (see kfd_queue_snapshot_entry and kfd_context_save_area_header). + * + * Return - Number of queues suspended on SUCCESS. + * . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked + * for each queue id in @queue_array_ptr array reports unsuccessful + * suspend reason. + * KFD_DBG_QUEUE_ERROR_MASK = HW failure. + * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or + * is being destroyed. + */ +struct kfd_ioctl_dbg_trap_suspend_queues_args { + __u64 exception_mask; + __u64 queue_array_ptr; + __u32 num_queues; + __u32 grace_period; +}; + +/** + * kfd_ioctl_dbg_trap_resume_queues_args + * + * Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES + * Resume queues. + * + * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) + * to resume + * @num_queues (IN) - number of queues to resume in @queue_array_ptr + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - Number of queues resumed on SUCCESS. + * KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask + * for each queue id in @queue_array_ptr array reports unsuccessful + * resume reason. + * KFD_DBG_QUEUE_ERROR_MASK = HW failure. + * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist. + */ +struct kfd_ioctl_dbg_trap_resume_queues_args { + __u64 queue_array_ptr; + __u32 num_queues; + __u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_set_node_address_watch_args + * + * Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH + * Sets address watch for device. + * + * @address (IN) - watch address to set + * @mode (IN) - see kfd_dbg_trap_address_watch_mode + * @mask (IN) - watch address mask + * @gpu_id (IN) - target gpu to set watch point + * @id (OUT) - watch id allocated + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * Allocated watch ID returned to @id. + * - ENODEV if gpu_id not found. + * - ENOMEM if watch IDs can be allocated + */ +struct kfd_ioctl_dbg_trap_set_node_address_watch_args { + __u64 address; + __u32 mode; + __u32 mask; + __u32 gpu_id; + __u32 id; +}; + +/** + * kfd_ioctl_dbg_trap_clear_node_address_watch_args + * + * Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH + * Clear address watch for device. + * + * @gpu_id (IN) - target device to clear watch point + * @id (IN) - allocated watch id to clear + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * - ENODEV if gpu_id not found. + * - EINVAL if watch ID has not been allocated. + */ +struct kfd_ioctl_dbg_trap_clear_node_address_watch_args { + __u32 gpu_id; + __u32 id; +}; + +/** + * kfd_ioctl_dbg_trap_set_flags_args + * + * Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS + * Sets flags for wave behaviour. + * + * @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * - EACCESS if any debug device does not allow flag options. + */ +struct kfd_ioctl_dbg_trap_set_flags_args { + __u32 flags; + __u32 pad; +}; + +/** + * kfd_ioctl_dbg_trap_query_debug_event_args + * + * Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT + * + * Find one or more raised exceptions. This function can return multiple + * exceptions from a single queue or a single device with one call. To find + * all raised exceptions, this function must be called repeatedly until it + * returns -EAGAIN. Returned exceptions can optionally be cleared by + * setting the corresponding bit in the @exception_mask input parameter. + * However, clearing an exception prevents retrieving further information + * about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO. + * + * @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT) + * @gpu_id (OUT) - gpu id of exceptions raised + * @queue_id (OUT) - queue id of exceptions raised + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on raised exception found + * Raised exceptions found are returned in @exception mask + * with reported source id returned in @gpu_id or @queue_id. + * - EAGAIN if no raised exception has been found + */ +struct kfd_ioctl_dbg_trap_query_debug_event_args { + __u64 exception_mask; + __u32 gpu_id; + __u32 queue_id; +}; + +/** + * kfd_ioctl_dbg_trap_query_exception_info_args + * + * Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO + * Get additional info on raised exception. + * + * @info_ptr (IN) - pointer to exception info buffer to copy to + * @info_size (IN/OUT) - exception info buffer size (bytes) + * @source_id (IN) - target gpu or queue id + * @exception_code (IN) - target exception + * @clear_exception (IN) - clear raised @exception_code exception + * (0 = false, 1 = true) + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT) + * bytes of memory exception data to @info_ptr. + * If @exception_code is EC_PROCESS_RUNTIME, copy saved + * kfd_runtime_info to @info_ptr. + * Actual required @info_ptr size (bytes) is returned in @info_size. + */ +struct kfd_ioctl_dbg_trap_query_exception_info_args { + __u64 info_ptr; + __u32 info_size; + __u32 source_id; + __u32 exception_code; + __u32 clear_exception; +}; + +/** + * kfd_ioctl_dbg_trap_get_queue_snapshot_args + * + * Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT + * Get queue information. + * + * @exception_mask (IN) - exceptions raised to clear + * @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry) + * @num_queues (IN/OUT) - number of queue snapshot entries + * The debugger specifies the size of the array allocated in @num_queues. + * KFD returns the number of queues that actually existed. If this is + * larger than the size specified by the debugger, KFD will not overflow + * the array allocated by the debugger. + * + * @entry_size (IN/OUT) - size per entry in bytes + * The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in + * @entry_size. KFD returns the number of bytes actually populated per + * entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine, + * which fields in struct kfd_queue_snapshot_entry are valid. This allows + * growing the ABI in a backwards compatible manner. + * Note that entry_size(IN) should still be used to stride the snapshot buffer in the + * event that it's larger than actual kfd_queue_snapshot_entry. + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN) + * into @snapshot_buf_ptr if @num_queues(IN) > 0. + * Otherwise return @num_queues(OUT) queue snapshot entries that exist. + */ +struct kfd_ioctl_dbg_trap_queue_snapshot_args { + __u64 exception_mask; + __u64 snapshot_buf_ptr; + __u32 num_queues; + __u32 entry_size; +}; + +/** + * kfd_ioctl_dbg_trap_get_device_snapshot_args + * + * Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT + * Get device information. + * + * @exception_mask (IN) - exceptions raised to clear + * @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry) + * @num_devices (IN/OUT) - number of debug devices to snapshot + * The debugger specifies the size of the array allocated in @num_devices. + * KFD returns the number of devices that actually existed. If this is + * larger than the size specified by the debugger, KFD will not overflow + * the array allocated by the debugger. + * + * @entry_size (IN/OUT) - size per entry in bytes + * The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in + * @entry_size. KFD returns the number of bytes actually populated. The + * debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields + * in struct kfd_dbg_device_info_entry are valid. This allows growing the + * ABI in a backwards compatible manner. + * Note that entry_size(IN) should still be used to stride the snapshot buffer in the + * event that it's larger than actual kfd_dbg_device_info_entry. + * + * Generic errors apply (see kfd_dbg_trap_operations). + * Return - 0 on SUCCESS. + * Copies @num_devices(IN) device snapshot entries of size @entry_size(IN) + * into @snapshot_buf_ptr if @num_devices(IN) > 0. + * Otherwise return @num_devices(OUT) queue snapshot entries that exist. + */ +struct kfd_ioctl_dbg_trap_device_snapshot_args { + __u64 exception_mask; + __u64 snapshot_buf_ptr; + __u32 num_devices; + __u32 entry_size; +}; + +/** + * kfd_ioctl_dbg_trap_args + * + * Arguments to debug target process. + * + * @pid - target process to debug + * @op - debug operation (see kfd_dbg_trap_operations) + * + * @op determines which union struct args to use. + * Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct. + */ +struct kfd_ioctl_dbg_trap_args { + __u32 pid; + __u32 op; + + union { + struct kfd_ioctl_dbg_trap_enable_args enable; + struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event; + struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled; + struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override; + struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode; + struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues; + struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues; + struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch; + struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch; + struct kfd_ioctl_dbg_trap_set_flags_args set_flags; + struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event; + struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info; + struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot; + struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot; + }; +}; + #define AMDKFD_IOCTL_BASE 'K' #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) @@ -887,7 +1547,13 @@ struct kfd_ioctl_set_xnack_mode_args { #define AMDKFD_IOC_EXPORT_DMABUF \ AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args) +#define AMDKFD_IOC_RUNTIME_ENABLE \ + AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args) + +#define AMDKFD_IOC_DBG_TRAP \ + AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x25 +#define AMDKFD_COMMAND_END 0x27 #endif -- cgit v1.2.3 From d230f1bfe7a1977565ce1e2804ddb7b7a3d911ff Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 25 Mar 2022 12:39:06 -0400 Subject: drm/amdkfd: display debug capabilities Expose debug capabilities in the KFD topology node's HSA capabilities and debug properties flags. Ensure correct capabilities are exposed based on firmware support. Flag definitions can be referenced in uapi/linux/kfd_sysfs.h. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 101 ++++++++++++++++++++++++++++-- drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 6 ++ include/uapi/linux/kfd_sysfs.h | 15 +++++ 3 files changed, 117 insertions(+), 5 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 8302d8967158..3def25b2bdbb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -535,6 +535,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->gpu->kfd->mec_fw_version); sysfs_show_32bit_prop(buffer, offs, "capability", dev->node_props.capability); + sysfs_show_64bit_prop(buffer, offs, "debug_prop", + dev->node_props.debug_prop); sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", dev->gpu->kfd->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", @@ -1857,6 +1859,97 @@ err: return res; } +static void kfd_topology_set_dbg_firmware_support(struct kfd_topology_device *dev) +{ + bool firmware_supported = true; + + if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0) && + KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) { + firmware_supported = + (dev->gpu->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 9; + goto out; + } + + /* + * Note: Any unlisted devices here are assumed to support exception handling. + * Add additional checks here as needed. + */ + switch (KFD_GC_VERSION(dev->gpu)) { + case IP_VERSION(9, 0, 1): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 459 + 32768; + break; + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 0): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 459; + break; + case IP_VERSION(9, 4, 1): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 60; + break; + case IP_VERSION(9, 4, 2): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 51; + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 1): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 144; + break; + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + firmware_supported = dev->gpu->kfd->mec_fw_version >= 89; + break; + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 3): + firmware_supported = false; + break; + default: + break; + } + +out: + if (firmware_supported) + dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED; +} + +static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) +{ + dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << + HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & + HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); + + dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_SUPPORT | + HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED | + HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED; + + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) { + dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 | + HSA_DBG_WATCH_ADDR_MASK_HI_BIT; + + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 4, 2)) + dev->node_props.debug_prop |= + HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; + else + dev->node_props.capability |= + HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; + } else { + dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | + HSA_DBG_WATCH_ADDR_MASK_HI_BIT; + + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(11, 0, 0)) + dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; + else + dev->node_props.capability |= + HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED; + } + + kfd_topology_set_dbg_firmware_support(dev); +} + int kfd_topology_add_device(struct kfd_node *gpu) { uint32_t gpu_id; @@ -1967,13 +2060,11 @@ int kfd_topology_add_device(struct kfd_node *gpu) HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); break; default: - if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 0, 1)) - dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << - HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & - HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); - else + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 0, 1)) WARN(1, "Unexpected ASIC family %u", dev->gpu->adev->asic_type); + else + kfd_topology_set_capabilities(dev); } /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index 3b8afb6aba79..cba2cd5ed9d1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -31,6 +31,11 @@ #define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32 +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX9 6 +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 7 +#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT \ + (29 << HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT) + struct kfd_node_properties { uint64_t hive_id; uint32_t cpu_cores_count; @@ -42,6 +47,7 @@ struct kfd_node_properties { uint32_t cpu_core_id_base; uint32_t simd_id_base; uint32_t capability; + uint64_t debug_prop; uint32_t max_waves_per_simd; uint32_t lds_size_in_kb; uint32_t gds_size_in_kb; diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h index 3e330f368917..a51b7331e0b4 100644 --- a/include/uapi/linux/kfd_sysfs.h +++ b/include/uapi/linux/kfd_sysfs.h @@ -43,6 +43,11 @@ #define HSA_CAP_DOORBELL_TYPE_2_0 0x2 #define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000 +#define HSA_CAP_TRAP_DEBUG_SUPPORT 0x00008000 +#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED 0x00010000 +#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED 0x00020000 +#define HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED 0x00040000 + /* Old buggy user mode depends on this being 0 */ #define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000 @@ -53,8 +58,18 @@ #define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000 #define HSA_CAP_SVMAPI_SUPPORTED 0x08000000 #define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000 +#define HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED 0x20000000 #define HSA_CAP_RESERVED 0xe00f8000 +/* debug_prop bits in node properties */ +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK 0x0000000f +#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_SHIFT 0 +#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_MASK 0x000003f0 +#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT 4 +#define HSA_DBG_DISPATCH_INFO_ALWAYS_VALID 0x00000400 +#define HSA_DBG_WATCHPOINTS_EXCLUSIVE 0x00000800 +#define HSA_DBG_RESERVED 0xfffffffffffff000ull + /* Heap types in memory properties */ #define HSA_MEM_HEAP_TYPE_SYSTEM 0 #define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1 -- cgit v1.2.3 From a159afdad2f6b97e4d18549cff2b53d17e68a412 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 10 May 2022 12:51:26 -0400 Subject: drm/amdkfd: bump kfd ioctl minor version for debug api availability Bump the minor version to declare debugging capability is now available. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 1 - include/uapi/linux/kfd_ioctl.h | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index f54ff5c3387d..cce2abe12e1b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -2984,7 +2984,6 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v if (!r) target->exception_enable_mask = args->enable.exception_mask; - pr_warn("Debug functions limited\n"); break; case KFD_IOC_DBG_TRAP_DISABLE: r = kfd_dbg_trap_disable(target); diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 32913d674d38..1781e7669982 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -38,9 +38,10 @@ * - 1.10 - Add SMI profiler event log * - 1.11 - Add unified memory for ctx save/restore area * - 1.12 - Add DMA buf export ioctl + * - 1.13 - Add debugger API */ #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 12 +#define KFD_IOCTL_MINOR_VERSION 13 struct kfd_ioctl_get_version_args { __u32 major_version; /* from KFD */ -- cgit v1.2.3 From 6f582513ad15de729ee5c91dfef946f3c266a207 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 17 May 2023 16:19:51 -0400 Subject: drm/amdkfd: add event age tracking Add event age tracking Signed-off-by: James Zhu Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- include/uapi/linux/kfd_ioctl.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 1781e7669982..93f1c0bc5caf 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -320,12 +320,20 @@ struct kfd_hsa_hw_exception_data { __u32 gpu_id; }; +/* hsa signal event data */ +struct kfd_hsa_signal_event_data { + __u64 last_event_age; /* to and from KFD */ +}; + /* Event data */ struct kfd_event_data { union { + /* From KFD */ struct kfd_hsa_memory_exception_data memory_exception_data; struct kfd_hsa_hw_exception_data hw_exception_data; - }; /* From KFD */ + /* To and From KFD */ + struct kfd_hsa_signal_event_data signal_event_data; + }; __u64 kfd_event_data_ext; /* pointer to an extension structure for future exception types */ __u32 event_id; /* to KFD */ -- cgit v1.2.3 From d297eedf83f5af96751c0da1e4355c19244a55a2 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 7 Jun 2023 13:27:40 -0400 Subject: drm/amdkfd: bump kfd ioctl minor version for event age availability Bump the minor version to declare event age tracking feature is now available. In kernel amdgpu driver, kfd_wait_on_events is used to support user space signal event wait function. For multiple threads waiting on same event scenery, race condition could occur since some threads after checking signal condition, before calling kfd_wait_on_events, the event interrupt could be fired and wake up other thread which are sleeping on this event. Then those threads could fall into sleep without waking up again. Adding event age tracking in both kernel and user mode, will help avoiding this race condition. Proposed ROCT-Thunk-Interface: https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/commit/efdbf6cfbc026bd68ac3c35d00dacf84370eb81e https://github.com/RadeonOpenCompute/ROCT-Thunk-Interface/commit/1820ae0a2db85b6f584611dc0cde1a00e7c22915 Proposed ROCR-Runtime: https://github.com/RadeonOpenCompute/ROCR-Runtime/compare/master...zhums:ROCR-Runtime:new_event_wait_review https://github.com/RadeonOpenCompute/ROCR-Runtime/commit/e1f5bdb88eb882ac798aeca2c00ea3fbb2dba459 https://github.com/RadeonOpenCompute/ROCR-Runtime/commit/7d26afd14107b5c2a754c1a3f415d89f3aabb503 Signed-off-by: James Zhu Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- include/uapi/linux/kfd_ioctl.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 93f1c0bc5caf..eeb2fdcbdcb7 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -39,9 +39,10 @@ * - 1.11 - Add unified memory for ctx save/restore area * - 1.12 - Add DMA buf export ioctl * - 1.13 - Add debugger API + * - 1.14 - Update kfd_event_data */ #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 13 +#define KFD_IOCTL_MINOR_VERSION 14 struct kfd_ioctl_get_version_args { __u32 major_version; /* from KFD */ -- cgit v1.2.3