drm/i915: Refactor sseu helper functions

Move functions to intel_sseu.h and remove inline qualifier.
Additionally, ensure these are all prefixed with intel_sseu_*
to match the convention of other functions in i915.

v2: fix spacing from checkpatch warning
v3: squash helper function changes into a single patch
    break 80 character line to fix checkpatch warning
    move get/set_eus helpers to intel_device_info.c
v4: Remove intel_ prefix from static functions in
    intel_device_info.c and correctly copy changes
    to stride calculation in those functions.

Acked-by: Jani Nikula <jani.nikula@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Stuart Summers <stuart.summers@intel.com>
Signed-off-by: Manasi Navare <manasi.d.navare@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190524154022.13575-5-stuart.summers@intel.com
This commit is contained in:
Stuart Summers 2019-05-24 08:40:21 -07:00 committed by Manasi Navare
parent b5ab1abe8d
commit 0040fd19e7
6 changed files with 74 additions and 63 deletions

View file

@ -8,6 +8,23 @@
#include "intel_lrc_reg.h" #include "intel_lrc_reg.h"
#include "intel_sseu.h" #include "intel_sseu.h"
unsigned int
intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
{
unsigned int i, total = 0;
for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
total += hweight8(sseu->subslice_mask[i]);
return total;
}
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice)
{
return hweight8(sseu->subslice_mask[slice]);
}
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu) const struct intel_sseu *req_sseu)
{ {

View file

@ -63,11 +63,11 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu)
return value; return value;
} }
static inline unsigned int unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
{
return hweight8(sseu->subslice_mask[slice]); unsigned int
} intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, u32 intel_sseu_make_rpcs(struct drm_i915_private *i915,
const struct intel_sseu *req_sseu); const struct intel_sseu *req_sseu);

View file

@ -4176,7 +4176,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
} }
sseu->eu_total = sseu->eu_per_subslice * sseu->eu_total = sseu->eu_per_subslice *
sseu_subslice_total(sseu); intel_sseu_subslice_total(sseu);
/* subtract fused off EU(s) from enabled slice(s) */ /* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) { for (s = 0; s < fls(sseu->slice_mask); s++) {
@ -4200,7 +4200,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
seq_printf(m, " %s Slice Total: %u\n", type, seq_printf(m, " %s Slice Total: %u\n", type,
hweight8(sseu->slice_mask)); hweight8(sseu->slice_mask));
seq_printf(m, " %s Subslice Total: %u\n", type, seq_printf(m, " %s Subslice Total: %u\n", type,
sseu_subslice_total(sseu)); intel_sseu_subslice_total(sseu));
for (s = 0; s < fls(sseu->slice_mask); s++) { for (s = 0; s < fls(sseu->slice_mask); s++) {
seq_printf(m, " %s Slice%i subslices: %u\n", type, seq_printf(m, " %s Slice%i subslices: %u\n", type,
s, intel_sseu_subslices_per_slice(sseu, s)); s, intel_sseu_subslices_per_slice(sseu, s));

View file

@ -386,7 +386,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = i915_cmd_parser_get_version(dev_priv); value = i915_cmd_parser_get_version(dev_priv);
break; break;
case I915_PARAM_SUBSLICE_TOTAL: case I915_PARAM_SUBSLICE_TOTAL:
value = sseu_subslice_total(sseu); value = intel_sseu_subslice_total(sseu);
if (!value) if (!value)
return -ENODEV; return -ENODEV;
break; break;

View file

@ -90,7 +90,7 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
drm_printf(p, "slice total: %u, mask=%04x\n", drm_printf(p, "slice total: %u, mask=%04x\n",
hweight8(sseu->slice_mask), sseu->slice_mask); hweight8(sseu->slice_mask), sseu->slice_mask);
drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu)); drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu));
for (s = 0; s < sseu->max_slices; s++) { for (s = 0; s < sseu->max_slices; s++) {
drm_printf(p, "slice%d: %u subslices, mask=%04x\n", drm_printf(p, "slice%d: %u subslices, mask=%04x\n",
s, intel_sseu_subslices_per_slice(sseu, s), s, intel_sseu_subslices_per_slice(sseu, s),
@ -114,6 +114,40 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
info->cs_timestamp_frequency_khz); info->cs_timestamp_frequency_khz);
} }
static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice,
int subslice)
{
int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
int slice_stride = sseu->max_subslices * subslice_stride;
return slice * slice_stride + subslice * subslice_stride;
}
static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
int subslice)
{
int i, offset = sseu_eu_idx(sseu, slice, subslice);
u16 eu_mask = 0;
for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
eu_mask |= ((u16)sseu->eu_mask[offset + i]) <<
(i * BITS_PER_BYTE);
}
return eu_mask;
}
static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
u16 eu_mask)
{
int i, offset = sseu_eu_idx(sseu, slice, subslice);
for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
sseu->eu_mask[offset + i] =
(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
}
}
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p) struct drm_printer *p)
{ {
@ -260,9 +294,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
* EU in any one subslice may be fused off for die * EU in any one subslice may be fused off for die
* recovery. * recovery.
*/ */
sseu->eu_per_subslice = sseu_subslice_total(sseu) ? sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total, DIV_ROUND_UP(sseu->eu_total,
sseu_subslice_total(sseu)) : 0; intel_sseu_subslice_total(sseu)) :
0;
/* No restrictions on Power Gating */ /* No restrictions on Power Gating */
sseu->has_slice_pg = 1; sseu->has_slice_pg = 1;
@ -310,8 +345,9 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
* CHV expected to always have a uniform distribution of EU * CHV expected to always have a uniform distribution of EU
* across subslices. * across subslices.
*/ */
sseu->eu_per_subslice = sseu_subslice_total(sseu) ? sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
sseu->eu_total / sseu_subslice_total(sseu) : sseu->eu_total /
intel_sseu_subslice_total(sseu) :
0; 0;
/* /*
* CHV supports subslice power gating on devices with more than * CHV supports subslice power gating on devices with more than
@ -319,7 +355,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
* more than one EU pair per subslice. * more than one EU pair per subslice.
*/ */
sseu->has_slice_pg = 0; sseu->has_slice_pg = 0;
sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1; sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = (sseu->eu_per_subslice > 2); sseu->has_eu_pg = (sseu->eu_per_subslice > 2);
} }
@ -393,9 +429,10 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
* recovery. BXT is expected to be perfectly uniform in EU * recovery. BXT is expected to be perfectly uniform in EU
* distribution. * distribution.
*/ */
sseu->eu_per_subslice = sseu_subslice_total(sseu) ? sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total, DIV_ROUND_UP(sseu->eu_total,
sseu_subslice_total(sseu)) : 0; intel_sseu_subslice_total(sseu)) :
0;
/* /*
* SKL+ supports slice power gating on devices with more than * SKL+ supports slice power gating on devices with more than
* one slice, and supports EU power gating on devices with * one slice, and supports EU power gating on devices with
@ -407,7 +444,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
sseu->has_slice_pg = sseu->has_slice_pg =
!IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1; !IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1;
sseu->has_subslice_pg = sseu->has_subslice_pg =
IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1; IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = sseu->eu_per_subslice > 2; sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_GEN9_LP(dev_priv)) { if (IS_GEN9_LP(dev_priv)) {
@ -496,9 +533,10 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* subslices with the exception that any one EU in any one subslice may * subslices with the exception that any one EU in any one subslice may
* be fused off for die recovery. * be fused off for die recovery.
*/ */
sseu->eu_per_subslice = sseu_subslice_total(sseu) ? sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ?
DIV_ROUND_UP(sseu->eu_total, DIV_ROUND_UP(sseu->eu_total,
sseu_subslice_total(sseu)) : 0; intel_sseu_subslice_total(sseu)) :
0;
/* /*
* BDW supports slice power gating on devices with more than * BDW supports slice power gating on devices with more than

View file

@ -218,50 +218,6 @@ struct intel_driver_caps {
bool has_logical_contexts:1; bool has_logical_contexts:1;
}; };
static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
{
unsigned int i, total = 0;
for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++)
total += hweight8(sseu->subslice_mask[i]);
return total;
}
static inline int sseu_eu_idx(const struct sseu_dev_info *sseu,
int slice, int subslice)
{
int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice);
int slice_stride = sseu->max_subslices * subslice_stride;
return slice * slice_stride + subslice * subslice_stride;
}
static inline u16 sseu_get_eus(const struct sseu_dev_info *sseu,
int slice, int subslice)
{
int i, offset = sseu_eu_idx(sseu, slice, subslice);
u16 eu_mask = 0;
for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
eu_mask |= ((u16) sseu->eu_mask[offset + i]) <<
(i * BITS_PER_BYTE);
}
return eu_mask;
}
static inline void sseu_set_eus(struct sseu_dev_info *sseu,
int slice, int subslice, u16 eu_mask)
{
int i, offset = sseu_eu_idx(sseu, slice, subslice);
for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) {
sseu->eu_mask[offset + i] =
(eu_mask >> (BITS_PER_BYTE * i)) & 0xff;
}
}
const char *intel_platform_name(enum intel_platform platform); const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv); void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);