drm-misc-next for v6.6:

UAPI Changes:
 
  * nouveau:
    * Provide several GETPARAM ioctls
    * Provide VM_BIND ioctls
 
 Cross-subsystem Changes:
 
  * fbdev: Convert many drivers to fbdev I/O-memory helpers
 
  * media/vivid: Convert to fbdev I/O-memory helpers
 
  * vfio-dev/mdpy-fb: Convert to fbdev I/O-memory helpers
 
 Core Changes:
 
  * Documentation fixes
 
  * Do not select framebuffer console for fbdev emulation, fixes Kconfig
    dependencies
 
  * exec:
    * Add test cases for calling drm_exec() multiple times
    * Fix memory leak in sleftests
    * Build fixes
 
  * gem:
    * Fix lockdep checking
 
  * ttm:
    * Add Kunit tests
    * Cleanups
 
 Driver Changes:
 
  * atmel-hlcdc:
    * Support inverted pixclock polarity, required by several SoCs
 
  * bridge:
    * dw-hdmi: Update EDID on HDMI detection
    * sitronix-st7789v: Support panel orientation; Support rotation
                        property; Add support for Jasonic
  		       JT240MHQS-HWT-EK-E3 plus DT bindings; Minor
        		       fixes
 
  * ivpu:
    * Support VPU4
    * Refactorings
 
  * loongson:
    * Fixes
 
  * mcde:
    * Cleanups
 
  * nouveau:
    * Track GPU virtual memory via DRM GPUVA manager, enables Vulkan
      sparse binding/residency
 
  * panfrost:
    * Fix synchronization in IRQ handling
 
  * tve200:
    * Cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEchf7rIzpz2NEoWjlaA3BHVMLeiMFAmTUooEACgkQaA3BHVML
 eiPbvAgAuuPZVkIKqlVo05yNn8hpbj3iyTSQMvIrb5NTQe+hKh+ce3g41z3kxgRW
 +rShs6hVGupG671je/KbOvg+8qVpZSJwXJGY29dEVVQZ88/3EKON1H19RHm6TZvB
 GB2aL+KVjhCO+hK+K1LOs45ctG/VL+LsbCAcs4X2CiVNef41gyaeXpKjIP6NtEvs
 zxKrCw8q6YNl4Ou7zLSGvIMslA3apJRObcKiFKJS4Mc4Ynbz8nfBZgpVqbAAi8Df
 qYFAoPvbcOzn+MHSgbg41E0Mnje+qX8qGsTlTmikulunvxUdv9sqsGQ3dEw4N8iC
 wmBtCfOIv58cbcxgCwre74bjEozYJw==
 =FHJY
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2023-08-10' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for v6.6:

UAPI Changes:

 * nouveau:
   * Provide several GETPARAM ioctls
   * Provide VM_BIND ioctls

Cross-subsystem Changes:

 * fbdev: Convert many drivers to fbdev I/O-memory helpers

 * media/vivid: Convert to fbdev I/O-memory helpers

 * vfio-dev/mdpy-fb: Convert to fbdev I/O-memory helpers

Core Changes:

 * Documentation fixes

 * Do not select framebuffer console for fbdev emulation, fixes Kconfig
   dependencies

 * exec:
   * Add test cases for calling drm_exec() multiple times
   * Fix memory leak in sleftests
   * Build fixes

 * gem:
   * Fix lockdep checking

 * ttm:
   * Add Kunit tests
   * Cleanups

Driver Changes:

 * atmel-hlcdc:
   * Support inverted pixclock polarity, required by several SoCs

 * bridge:
   * dw-hdmi: Update EDID on HDMI detection
   * sitronix-st7789v: Support panel orientation; Support rotation
                       property; Add support for Jasonic
 		       JT240MHQS-HWT-EK-E3 plus DT bindings; Minor
       		       fixes

 * ivpu:
   * Support VPU4
   * Refactorings

 * loongson:
   * Fixes

 * mcde:
   * Cleanups

 * nouveau:
   * Track GPU virtual memory via DRM GPUVA manager, enables Vulkan
     sparse binding/residency

 * panfrost:
   * Fix synchronization in IRQ handling

 * tve200:
   * Cleanups

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20230810084505.GA14039@linux-uq9g
This commit is contained in:
Dave Airlie 2023-08-11 16:28:12 +10:00
commit e6b17f5ce9
128 changed files with 7552 additions and 1234 deletions

View file

@ -18,6 +18,7 @@ properties:
enum:
- edt,et028013dma
- inanbo,t28cp45tn89-v17
- jasonic,jt240mhqs-hwt-ek-e3
- sitronix,st7789v
reg: true
@ -25,6 +26,7 @@ properties:
power-supply: true
backlight: true
port: true
rotation: true
spi-cpha: true
spi-cpol: true
@ -58,6 +60,7 @@ examples:
reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>;
backlight = <&pwm_bl>;
power-supply = <&power>;
rotation = <180>;
spi-max-frequency = <100000>;
spi-cpol;
spi-cpha;

View file

@ -677,6 +677,8 @@ patternProperties:
description: iWave Systems Technologies Pvt. Ltd.
"^jadard,.*":
description: Jadard Technology Inc.
"^jasonic,.*":
description: Jasonic Technology Ltd.
"^jdi,.*":
description: Japan Display Inc.
"^jedec,.*":

View file

@ -6,3 +6,14 @@ drm/i915 uAPI
=============
.. kernel-doc:: include/uapi/drm/i915_drm.h
drm/nouveau uAPI
================
VM_BIND / EXEC uAPI
-------------------
.. kernel-doc:: drivers/gpu/drm/nouveau/nouveau_exec.c
:doc: Overview
.. kernel-doc:: include/uapi/drm/nouveau_drm.h

View file

@ -520,7 +520,7 @@ DRM Cache Handling and Fast WC memcpy()
.. _drm_sync_objects:
DRM Sync Objects
===========================
================
.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
:doc: Overview

View file

@ -7,7 +7,8 @@ intel_vpu-y := \
ivpu_fw.o \
ivpu_fw_log.o \
ivpu_gem.o \
ivpu_hw_mtl.o \
ivpu_hw_37xx.o \
ivpu_hw_40xx.o \
ivpu_ipc.o \
ivpu_job.o \
ivpu_jsm_msg.o \

View file

@ -115,6 +115,22 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link)
kref_put(&file_priv->ref, file_priv_release);
}
static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
{
switch (args->index) {
case DRM_IVPU_CAP_METRIC_STREAMER:
args->value = 0;
break;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
args->value = 1;
break;
default:
return -EINVAL;
}
return 0;
}
static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
@ -144,7 +160,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = ivpu_get_context_count(vdev);
break;
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
args->value = vdev->hw->ranges.user_low.start;
args->value = vdev->hw->ranges.user.start;
break;
case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
args->value = file_priv->priority;
@ -174,6 +190,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_SKU:
args->value = vdev->hw->sku;
break;
case DRM_IVPU_PARAM_CAPABILITIES:
ret = ivpu_get_capabilities(vdev, args);
break;
default:
ret = -EINVAL;
break;
@ -443,8 +462,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
/* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
/* VPU MTL does not require PCI spec 10m D3hot delay */
if (ivpu_is_mtl(vdev))
/* VPU 37XX does not require 10m D3hot delay */
if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
pdev->d3hot_delay = 0;
ret = pcim_enable_device(pdev);
@ -482,8 +501,13 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (!vdev->pm)
return -ENOMEM;
vdev->hw->ops = &ivpu_hw_mtl_ops;
vdev->hw->dma_bits = 38;
if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
vdev->hw->ops = &ivpu_hw_40xx_ops;
vdev->hw->dma_bits = 48;
} else {
vdev->hw->ops = &ivpu_hw_37xx_ops;
vdev->hw->dma_bits = 38;
}
vdev->platform = IVPU_PLATFORM_INVALID;
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
@ -610,6 +634,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);

View file

@ -23,6 +23,10 @@
#define DRIVER_DATE "20230117"
#define PCI_DEVICE_ID_MTL 0x7d1d
#define PCI_DEVICE_ID_LNL 0x643e
#define IVPU_HW_37XX 37
#define IVPU_HW_40XX 40
#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
/* SSID 1 is used by the VPU to represent invalid context */
@ -76,6 +80,7 @@ struct ivpu_wa_table {
bool clear_runtime_mem;
bool d3hot_after_power_off;
bool interrupt_clear_with_0;
bool disable_clock_relinquish;
};
struct ivpu_hw_info;
@ -146,11 +151,6 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
static inline bool ivpu_is_mtl(struct ivpu_device *vdev)
{
return to_pci_dev(vdev->drm.dev)->device == PCI_DEVICE_ID_MTL;
}
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
return to_pci_dev(vdev->drm.dev)->revision;
@ -161,6 +161,19 @@ static inline u16 ivpu_device_id(struct ivpu_device *vdev)
return to_pci_dev(vdev->drm.dev)->device;
}
static inline int ivpu_hw_gen(struct ivpu_device *vdev)
{
switch (ivpu_device_id(vdev)) {
case PCI_DEVICE_ID_MTL:
return IVPU_HW_37XX;
case PCI_DEVICE_ID_LNL:
return IVPU_HW_40XX;
default:
ivpu_err(vdev, "Unknown VPU device\n");
return 0;
}
}
static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev)
{
return container_of(dev, struct ivpu_device, drm);

View file

@ -43,12 +43,20 @@ static char *ivpu_firmware;
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
/* TODO: Remove mtl_vpu.bin from names after transition to generation based FW names */
static struct {
int gen;
const char *name;
} fw_names[] = {
{ IVPU_HW_37XX, "vpu_37xx.bin" },
{ IVPU_HW_37XX, "mtl_vpu.bin" },
{ IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
{ IVPU_HW_40XX, "vpu_40xx.bin" },
{ IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
};
static int ivpu_fw_request(struct ivpu_device *vdev)
{
static const char * const fw_names[] = {
"mtl_vpu.bin",
"intel/vpu/mtl_vpu_v0.0.bin"
};
int ret = -ENOENT;
int i;
@ -60,9 +68,12 @@ static int ivpu_fw_request(struct ivpu_device *vdev)
}
for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i], vdev->drm.dev);
if (fw_names[i].gen != ivpu_hw_gen(vdev))
continue;
ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
if (!ret) {
vdev->fw->name = fw_names[i];
vdev->fw->name = fw_names[i].name;
return 0;
}
}
@ -195,7 +206,7 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
return -EINVAL;
}
ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size);
ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
return 0;
}
@ -236,7 +247,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
}
if (fw->shave_nn_size) {
fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start,
fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
if (!fw->mem_shave_nn) {
ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
@ -434,9 +445,9 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
* Uncached region of VPU address space, covers IPC buffers, job queues
* and log buffers, programmable to L2$ Uncached by VPU MTRR
*/
boot_params->shared_region_base = vdev->hw->ranges.global_low.start;
boot_params->shared_region_size = vdev->hw->ranges.global_low.end -
vdev->hw->ranges.global_low.start;
boot_params->shared_region_base = vdev->hw->ranges.global.start;
boot_params->shared_region_size = vdev->hw->ranges.global.end -
vdev->hw->ranges.global.start;
boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
@ -444,10 +455,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
boot_params->global_aliased_pio_base =
vdev->hw->ranges.global_aliased_pio.start;
boot_params->global_aliased_pio_size =
ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio);
boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
/* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1;
@ -455,7 +464,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
/* Enable L2 cache for first 2GB of high memory */
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start);
ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);
if (vdev->fw->mem_shave_nn)
boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;

View file

@ -279,10 +279,12 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
int ret;
if (!range) {
if (bo->flags & DRM_IVPU_BO_HIGH_MEM)
range = &vdev->hw->ranges.user_high;
if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
range = &vdev->hw->ranges.shave;
else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
range = &vdev->hw->ranges.dma;
else
range = &vdev->hw->ranges.user_low;
range = &vdev->hw->ranges.user;
}
mutex_lock(&ctx->lock);
@ -570,7 +572,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
fixed_range.end = vpu_addr + size;
range = &fixed_range;
} else {
range = &vdev->hw->ranges.global_low;
range = &vdev->hw->ranges.global;
}
bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);

View file

@ -38,11 +38,10 @@ struct ivpu_addr_range {
struct ivpu_hw_info {
const struct ivpu_hw_ops *ops;
struct {
struct ivpu_addr_range global_low;
struct ivpu_addr_range global_high;
struct ivpu_addr_range user_low;
struct ivpu_addr_range user_high;
struct ivpu_addr_range global_aliased_pio;
struct ivpu_addr_range global;
struct ivpu_addr_range user;
struct ivpu_addr_range shave;
struct ivpu_addr_range dma;
} ranges;
struct {
u8 min_ratio;
@ -60,7 +59,8 @@ struct ivpu_hw_info {
int dma_bits;
};
extern const struct ivpu_hw_ops ivpu_hw_mtl_ops;
extern const struct ivpu_hw_ops ivpu_hw_37xx_ops;
extern const struct ivpu_hw_ops ivpu_hw_40xx_ops;
static inline int ivpu_hw_info_init(struct ivpu_device *vdev)
{

View file

@ -5,7 +5,7 @@
#include "ivpu_drv.h"
#include "ivpu_fw.h"
#include "ivpu_hw_mtl_reg.h"
#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_hw.h"
#include "ivpu_ipc.h"
@ -39,34 +39,34 @@
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
#define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC)
#define ICB_0_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
#define ICB_0_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
#define ICB_1_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
(REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
#define ICB_1_IRQ_MASK ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
(REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
#define BUTTRESS_IRQ_MASK ((REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
(REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
(REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
(REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
(REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
static char *ivpu_platform_to_str(u32 platform)
{
@ -84,8 +84,8 @@ static char *ivpu_platform_to_str(u32 platform)
static void ivpu_hw_read_platform(struct ivpu_device *vdev)
{
u32 gen_ctrl = REGV_RD32(MTL_VPU_HOST_SS_GEN_CTRL);
u32 platform = REG_GET_FLD(MTL_VPU_HOST_SS_GEN_CTRL, PS, gen_ctrl);
u32 gen_ctrl = REGV_RD32(VPU_37XX_HOST_SS_GEN_CTRL);
u32 platform = REG_GET_FLD(VPU_37XX_HOST_SS_GEN_CTRL, PS, gen_ctrl);
if (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
vdev->platform = platform;
@ -123,7 +123,7 @@ static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
{
return REGB_POLL_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
return REGB_POLL_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
}
/* Send KMD initiated workpoint change */
@ -139,23 +139,23 @@ static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ra
return ret;
}
val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD0);
val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD0, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0, val);
val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD1);
val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD1, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1, val);
val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD2);
val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD2, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2);
val = REG_SET_FLD_NUM(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2, val);
val = REGB_RD32(MTL_BUTTRESS_WP_REQ_CMD);
val = REG_SET_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, val);
REGB_WR32(MTL_BUTTRESS_WP_REQ_CMD, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_WP_REQ_CMD);
val = REG_SET_FLD(VPU_37XX_BUTTRESS_WP_REQ_CMD, SEND, val);
REGB_WR32(VPU_37XX_BUTTRESS_WP_REQ_CMD, val);
ret = ivpu_pll_wait_for_cmd_send(vdev);
if (ret)
@ -171,7 +171,7 @@ static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
if (IVPU_WA(punit_disabled))
return 0;
return REGB_POLL_FLD(MTL_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
return REGB_POLL_FLD(VPU_37XX_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
}
static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
@ -179,7 +179,7 @@ static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
if (IVPU_WA(punit_disabled))
return 0;
return REGB_POLL_FLD(MTL_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
return REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
}
static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
@ -188,21 +188,21 @@ static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
u32 fmin_fuse, fmax_fuse;
fmin_fuse = REGB_RD32(MTL_BUTTRESS_FMIN_FUSE);
fuse_min_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
fuse_pn_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
fmin_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMIN_FUSE);
fuse_min_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
fuse_pn_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
fmax_fuse = REGB_RD32(MTL_BUTTRESS_FMAX_FUSE);
fuse_max_ratio = REG_GET_FLD(MTL_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
fmax_fuse = REGB_RD32(VPU_37XX_BUTTRESS_FMAX_FUSE);
fuse_max_ratio = REG_GET_FLD(VPU_37XX_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
}
static int ivpu_hw_mtl_wait_for_vpuip_bar(struct ivpu_device *vdev)
static int ivpu_hw_37xx_wait_for_vpuip_bar(struct ivpu_device *vdev)
{
return REGV_POLL_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, AON, 0, 100);
return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
}
static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
@ -248,7 +248,7 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
return ret;
}
ret = ivpu_hw_mtl_wait_for_vpuip_bar(vdev);
ret = ivpu_hw_37xx_wait_for_vpuip_bar(vdev);
if (ret) {
ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
return ret;
@ -272,52 +272,52 @@ static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
{
u32 val = 0;
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_CLR, val);
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
}
static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_SET);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
if (enable) {
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
} else {
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
}
REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_SET, val);
REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
}
static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_CLK_SET);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
if (enable) {
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
} else {
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
}
REGV_WR32(MTL_VPU_HOST_SS_CPR_CLK_SET, val);
REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
}
static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@ -325,9 +325,9 @@ static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QACCEPTN);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@ -335,9 +335,9 @@ static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QDENY);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
return -EIO;
return 0;
@ -385,7 +385,7 @@ static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
{
REGV_WR32(MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
}
static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
@ -393,12 +393,12 @@ static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
int ret;
u32 val;
val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
if (enable)
val = REG_SET_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
else
val = REG_CLR_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
REGV_WR32(MTL_VPU_HOST_SS_NOC_QREQN, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
if (ret) {
@ -453,26 +453,26 @@ static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
if (enable)
val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
}
static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
if (enable)
val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
}
static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
@ -481,32 +481,32 @@ static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 ex
if (ivpu_is_fpga(vdev))
return 0;
return REGV_POLL_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
}
static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
if (enable)
val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
else
val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
}
static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
{
u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE);
u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
if (enable)
val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
else
val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val);
REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
}
static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
@ -538,25 +538,25 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES);
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
REGV_WR32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, val);
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
}
static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(MTL_VPU_HOST_IF_TBU_MMUSSIDV);
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
REGV_WR32(MTL_VPU_HOST_IF_TBU_MMUSSIDV, val);
REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
}
static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
@ -576,10 +576,10 @@ static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
val = vdev->fw->entry_point >> 9;
REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
val = REG_SET_FLD(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
@ -590,27 +590,27 @@ static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
int ret;
u32 val;
ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
return ret;
}
val = REGB_RD32(MTL_BUTTRESS_VPU_D0I3_CONTROL);
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL);
if (enable)
val = REG_SET_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
else
val = REG_CLR_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
REGB_WR32(MTL_BUTTRESS_VPU_D0I3_CONTROL, val);
val = REG_CLR_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
REGB_WR32(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, val);
ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
return ret;
}
static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
@ -620,16 +620,15 @@ static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
ivpu_pll_init_frequency_ratios(vdev);
ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M);
ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M);
ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M);
ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G);
hw->ranges.global_aliased_pio = hw->ranges.user_low;
ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M);
ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M);
ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);
return 0;
}
static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{
int ret;
u32 val;
@ -637,24 +636,24 @@ static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
if (IVPU_WA(punit_disabled))
return 0;
ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret) {
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
return ret;
}
val = REGB_RD32(MTL_BUTTRESS_VPU_IP_RESET);
val = REG_SET_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
REGB_WR32(MTL_BUTTRESS_VPU_IP_RESET, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
if (ret)
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
return ret;
}
static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
static int ivpu_hw_37xx_d0i3_enable(struct ivpu_device *vdev)
{
int ret;
@ -667,7 +666,7 @@ static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
return ret;
}
static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
static int ivpu_hw_37xx_d0i3_disable(struct ivpu_device *vdev)
{
int ret;
@ -678,7 +677,7 @@ static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
return ret;
}
static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
@ -686,11 +685,11 @@ static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
ivpu_hw_wa_init(vdev);
ivpu_hw_timeouts_init(vdev);
ret = ivpu_hw_mtl_reset(vdev);
ret = ivpu_hw_37xx_reset(vdev);
if (ret)
ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
ret = ivpu_hw_mtl_d0i3_disable(vdev);
ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
@ -732,7 +731,7 @@ static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
return ret;
}
static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
static int ivpu_hw_37xx_boot_fw(struct ivpu_device *vdev)
{
ivpu_boot_no_snoop_enable(vdev);
ivpu_boot_tbu_mmu_enable(vdev);
@ -741,32 +740,31 @@ static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
return 0;
}
static bool ivpu_hw_mtl_is_idle(struct ivpu_device *vdev)
static bool ivpu_hw_37xx_is_idle(struct ivpu_device *vdev)
{
u32 val;
if (IVPU_WA(punit_disabled))
return true;
val = REGB_RD32(MTL_BUTTRESS_VPU_STATUS);
return REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, READY, val) &&
REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, IDLE, val);
val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_STATUS);
return REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, READY, val) &&
REG_TEST_FLD(VPU_37XX_BUTTRESS_VPU_STATUS, IDLE, val);
}
static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
static int ivpu_hw_37xx_power_down(struct ivpu_device *vdev)
{
int ret = 0;
if (!ivpu_hw_mtl_is_idle(vdev) && ivpu_hw_mtl_reset(vdev)) {
if (!ivpu_hw_37xx_is_idle(vdev) && ivpu_hw_37xx_reset(vdev))
ivpu_err(vdev, "Failed to reset the VPU\n");
}
if (ivpu_pll_disable(vdev)) {
ivpu_err(vdev, "Failed to disable PLL\n");
ret = -EIO;
}
if (ivpu_hw_mtl_d0i3_enable(vdev)) {
if (ivpu_hw_37xx_d0i3_enable(vdev)) {
ivpu_err(vdev, "Failed to enter D0I3\n");
ret = -EIO;
}
@ -774,7 +772,7 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
return ret;
}
static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
static void ivpu_hw_37xx_wdt_disable(struct ivpu_device *vdev)
{
u32 val;
@ -792,7 +790,7 @@ static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
}
static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
{
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
u32 cpu_clock;
@ -806,35 +804,35 @@ static u32 ivpu_hw_mtl_pll_to_freq(u32 ratio, u32 config)
}
/* Register indirect accesses */
static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
{
u32 pll_curr_ratio;
pll_curr_ratio = REGB_RD32(MTL_BUTTRESS_CURRENT_PLL);
pll_curr_ratio &= MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK;
pll_curr_ratio = REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL);
pll_curr_ratio &= VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK;
if (!ivpu_is_silicon(vdev))
return PLL_SIMULATION_FREQ;
return ivpu_hw_mtl_pll_to_freq(pll_curr_ratio, vdev->hw->config);
return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
}
static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
{
return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_OFFSET);
return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
}
static u32 ivpu_hw_mtl_reg_telemetry_size_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_telemetry_size_get(struct ivpu_device *vdev)
{
return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_SIZE);
return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE);
}
static u32 ivpu_hw_mtl_reg_telemetry_enable_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_telemetry_enable_get(struct ivpu_device *vdev)
{
return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_ENABLE);
return REGB_RD32(VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE);
}
static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
static void ivpu_hw_37xx_reg_db_set(struct ivpu_device *vdev, u32 db_id)
{
u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0;
u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET);
@ -842,52 +840,52 @@ static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
}
static u32 ivpu_hw_mtl_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
{
return REGV_RD32(MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM);
return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
}
static u32 ivpu_hw_mtl_reg_ipc_rx_count_get(struct ivpu_device *vdev)
static u32 ivpu_hw_37xx_reg_ipc_rx_count_get(struct ivpu_device *vdev)
{
u32 count = REGV_RD32_SILENT(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT);
u32 count = REGV_RD32_SILENT(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
return REG_GET_FLD(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
}
static void ivpu_hw_mtl_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
static void ivpu_hw_37xx_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
{
REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr);
}
static void ivpu_hw_mtl_irq_clear(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_clear(struct ivpu_device *vdev)
{
REGV_WR64(MTL_VPU_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
}
static void ivpu_hw_mtl_irq_enable(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_enable(struct ivpu_device *vdev)
{
REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
}
static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
{
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
REGB_WR32(VPU_37XX_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
}
static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
ivpu_pm_schedule_recovery(vdev);
}
static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
@ -895,7 +893,7 @@ static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
ivpu_pm_schedule_recovery(vdev);
}
static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
{
ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
@ -903,65 +901,66 @@ static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
}
/* Handler for IRQs from VPU core (irqV) */
static u32 ivpu_hw_mtl_irqv_handler(struct ivpu_device *vdev, int irq)
static u32 ivpu_hw_37xx_irqv_handler(struct ivpu_device *vdev, int irq)
{
u32 status = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
REGV_WR32(MTL_VPU_HOST_SS_ICB_CLEAR_0, status);
REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
ivpu_mmu_irq_evtq_handler(vdev);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
ivpu_ipc_irq_handler(vdev);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
ivpu_mmu_irq_gerr_handler(vdev);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
ivpu_hw_mtl_irq_wdt_mss_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
ivpu_hw_37xx_irq_wdt_mss_handler(vdev);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
ivpu_hw_mtl_irq_wdt_nce_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
ivpu_hw_37xx_irq_wdt_nce_handler(vdev);
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
ivpu_hw_mtl_irq_noc_firewall_handler(vdev);
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
ivpu_hw_37xx_irq_noc_firewall_handler(vdev);
return status;
}
/* Handler for IRQs from Buttress core (irqB) */
static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
{
u32 status = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
u32 status = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
bool schedule_recovery = false;
if (status == 0)
return 0;
/* Disable global interrupt before handling local buttress interrupts */
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(MTL_BUTTRESS_CURRENT_PLL));
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
REGB_WR32(MTL_BUTTRESS_ATS_ERR_CLEAR, 0x1);
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
REGB_WR32(VPU_37XX_BUTTRESS_ATS_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
REGB_WR32(MTL_BUTTRESS_UFI_ERR_CLEAR, 0x1);
ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
REGB_WR32(VPU_37XX_BUTTRESS_UFI_ERR_CLEAR, 0x1);
schedule_recovery = true;
}
@ -971,12 +970,12 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
* Writing 1 triggers an interrupt, so we can't perform read update write.
* Clear local interrupt status by writing 0 to all bits.
*/
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0);
else
REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, status);
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
/* Re-enable global interrupt */
REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
@ -984,65 +983,65 @@ static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
return status;
}
static irqreturn_t ivpu_hw_mtl_irq_handler(int irq, void *ptr)
static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
{
struct ivpu_device *vdev = ptr;
u32 ret_irqv, ret_irqb;
ret_irqv = ivpu_hw_mtl_irqv_handler(vdev, irq);
ret_irqb = ivpu_hw_mtl_irqb_handler(vdev, irq);
ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
return IRQ_RETVAL(ret_irqb | ret_irqv);
}
static void ivpu_hw_mtl_diagnose_failure(struct ivpu_device *vdev)
static void ivpu_hw_37xx_diagnose_failure(struct ivpu_device *vdev)
{
u32 irqv = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
u32 irqb = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
u32 irqv = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
u32 irqb = REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
if (ivpu_hw_mtl_reg_ipc_rx_count_get(vdev))
if (ivpu_hw_37xx_reg_ipc_rx_count_get(vdev))
ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
ivpu_err(vdev, "WDT MSS timeout detected\n");
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
ivpu_err(vdev, "WDT NCE timeout detected\n");
if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
ivpu_err(vdev, "NOC Firewall irq detected\n");
if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_37XX_BUTTRESS_ATS_ERR_LOG_0));
if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
u32 ufi_log = REGB_RD32(VPU_37XX_BUTTRESS_UFI_ERR_LOG);
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
ufi_log, REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
REG_GET_FLD(VPU_37XX_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
}
}
const struct ivpu_hw_ops ivpu_hw_mtl_ops = {
.info_init = ivpu_hw_mtl_info_init,
.power_up = ivpu_hw_mtl_power_up,
.is_idle = ivpu_hw_mtl_is_idle,
.power_down = ivpu_hw_mtl_power_down,
.boot_fw = ivpu_hw_mtl_boot_fw,
.wdt_disable = ivpu_hw_mtl_wdt_disable,
.diagnose_failure = ivpu_hw_mtl_diagnose_failure,
.reg_pll_freq_get = ivpu_hw_mtl_reg_pll_freq_get,
.reg_telemetry_offset_get = ivpu_hw_mtl_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_mtl_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_mtl_reg_telemetry_enable_get,
.reg_db_set = ivpu_hw_mtl_reg_db_set,
.reg_ipc_rx_addr_get = ivpu_hw_mtl_reg_ipc_rx_addr_get,
.reg_ipc_rx_count_get = ivpu_hw_mtl_reg_ipc_rx_count_get,
.reg_ipc_tx_set = ivpu_hw_mtl_reg_ipc_tx_set,
.irq_clear = ivpu_hw_mtl_irq_clear,
.irq_enable = ivpu_hw_mtl_irq_enable,
.irq_disable = ivpu_hw_mtl_irq_disable,
.irq_handler = ivpu_hw_mtl_irq_handler,
const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.info_init = ivpu_hw_37xx_info_init,
.power_up = ivpu_hw_37xx_power_up,
.is_idle = ivpu_hw_37xx_is_idle,
.power_down = ivpu_hw_37xx_power_down,
.boot_fw = ivpu_hw_37xx_boot_fw,
.wdt_disable = ivpu_hw_37xx_wdt_disable,
.diagnose_failure = ivpu_hw_37xx_diagnose_failure,
.reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
.reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
.reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
.reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
.reg_db_set = ivpu_hw_37xx_reg_db_set,
.reg_ipc_rx_addr_get = ivpu_hw_37xx_reg_ipc_rx_addr_get,
.reg_ipc_rx_count_get = ivpu_hw_37xx_reg_ipc_rx_count_get,
.reg_ipc_tx_set = ivpu_hw_37xx_reg_ipc_tx_set,
.irq_clear = ivpu_hw_37xx_irq_clear,
.irq_enable = ivpu_hw_37xx_irq_enable,
.irq_disable = ivpu_hw_37xx_irq_disable,
.irq_handler = ivpu_hw_37xx_irq_handler,
};

View file

@ -0,0 +1,281 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#ifndef __IVPU_HW_MTL_REG_H__
#define __IVPU_HW_MTL_REG_H__
#include <linux/bits.h>
#define VPU_37XX_BUTTRESS_INTERRUPT_TYPE 0x00000000u
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT 0x00000004u
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_WP_REQ_CMD 0x00000014u
#define VPU_37XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_WP_DOWNLOAD 0x00000018u
#define VPU_37XX_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_CURRENT_PLL 0x0000001cu
#define VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
#define VPU_37XX_BUTTRESS_PLL_ENABLE 0x00000020u
#define VPU_37XX_BUTTRESS_FMIN_FUSE 0x00000024u
#define VPU_37XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
#define VPU_37XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
#define VPU_37XX_BUTTRESS_FMAX_FUSE 0x00000028u
#define VPU_37XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
#define VPU_37XX_BUTTRESS_TILE_FUSE 0x0000002cu
#define VPU_37XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
#define VPU_37XX_BUTTRESS_LOCAL_INT_MASK 0x00000030u
#define VPU_37XX_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
#define VPU_37XX_BUTTRESS_PLL_STATUS 0x00000040u
#define VPU_37XX_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
#define VPU_37XX_BUTTRESS_VPU_STATUS 0x00000044u
#define VPU_37XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
#define VPU_37XX_BUTTRESS_VPU_IP_RESET 0x00000050u
#define VPU_37XX_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
#define VPU_37XX_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG 0x000000b0u
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
#define VPU_37XX_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
#define VPU_37XX_HOST_SS_CPR_CLK_SET 0x00000084u
#define VPU_37XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
#define VPU_37XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
#define VPU_37XX_HOST_SS_CPR_RST_SET 0x00000094u
#define VPU_37XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
#define VPU_37XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
#define VPU_37XX_HOST_SS_CPR_RST_CLR 0x00000098u
#define VPU_37XX_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
#define VPU_37XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
#define VPU_37XX_HOST_SS_HW_VERSION 0x00000108u
#define VPU_37XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
#define VPU_37XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
#define VPU_37XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
#define VPU_37XX_HOST_SS_GEN_CTRL 0x00000118u
#define VPU_37XX_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
#define VPU_37XX_HOST_SS_NOC_QREQN 0x00000154u
#define VPU_37XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_NOC_QACCEPTN 0x00000158u
#define VPU_37XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_NOC_QDENY 0x0000015cu
#define VPU_37XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QREQN 0x00000160u
#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u
#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define MTL_VPU_TOP_NOC_QDENY 0x00000168u
#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
#define VPU_37XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
#define VPU_37XX_HOST_SS_ICB_STATUS_0 0x00010210u
#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
#define VPU_37XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
#define VPU_37XX_HOST_SS_ICB_STATUS_1 0x00010214u
#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
#define VPU_37XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
#define VPU_37XX_HOST_SS_ICB_CLEAR_0 0x00010220u
#define VPU_37XX_HOST_SS_ICB_CLEAR_1 0x00010224u
#define VPU_37XX_HOST_SS_ICB_ENABLE_0 0x00010240u
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0)
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8)
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
#define VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24)
#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
#define VPU_37XX_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
#define VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3)
#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u
#define VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u
#define VPU_37XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO 0x00041040u
#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0)
#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
#define VPU_37XX_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
#define VPU_37XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
#define VPU_37XX_HOST_MMU_IDR0 0x00200000u
#define VPU_37XX_HOST_MMU_IDR1 0x00200004u
#define VPU_37XX_HOST_MMU_IDR3 0x0020000cu
#define VPU_37XX_HOST_MMU_IDR5 0x00200014u
#define VPU_37XX_HOST_MMU_CR0 0x00200020u
#define VPU_37XX_HOST_MMU_CR0ACK 0x00200024u
#define VPU_37XX_HOST_MMU_CR1 0x00200028u
#define VPU_37XX_HOST_MMU_CR2 0x0020002cu
#define VPU_37XX_HOST_MMU_IRQ_CTRL 0x00200050u
#define VPU_37XX_HOST_MMU_IRQ_CTRLACK 0x00200054u
#define VPU_37XX_HOST_MMU_GERROR 0x00200060u
#define VPU_37XX_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0)
#define VPU_37XX_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
#define VPU_37XX_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
#define VPU_37XX_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
#define VPU_37XX_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
#define VPU_37XX_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
#define VPU_37XX_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7)
#define VPU_37XX_HOST_MMU_GERRORN 0x00200064u
#define VPU_37XX_HOST_MMU_STRTAB_BASE 0x00200080u
#define VPU_37XX_HOST_MMU_STRTAB_BASE_CFG 0x00200088u
#define VPU_37XX_HOST_MMU_CMDQ_BASE 0x00200090u
#define VPU_37XX_HOST_MMU_CMDQ_PROD 0x00200098u
#define VPU_37XX_HOST_MMU_CMDQ_CONS 0x0020009cu
#define VPU_37XX_HOST_MMU_EVTQ_BASE 0x002000a0u
#define VPU_37XX_HOST_MMU_EVTQ_PROD 0x002000a8u
#define VPU_37XX_HOST_MMU_EVTQ_CONS 0x002000acu
#define VPU_37XX_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
#define VPU_37XX_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
#define VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV 0x00360004u
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
#define VPU_37XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4)
#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu
#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u
#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u
#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u
#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u
#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u
#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u
#endif /* __IVPU_HW_MTL_REG_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,267 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#ifndef __IVPU_HW_40XX_REG_H__
#define __IVPU_HW_40XX_REG_H__
#include <linux/bits.h>
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT 0x00000000u
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI0_ERR_MASK BIT_MASK(2)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI1_ERR_MASK BIT_MASK(3)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR0_ERR_MASK BIT_MASK(4)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR1_ERR_MASK BIT_MASK(5)
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_SURV_ERR_MASK BIT_MASK(6)
#define VPU_40XX_BUTTRESS_LOCAL_INT_MASK 0x00000004u
#define VPU_40XX_BUTTRESS_GLOBAL_INT_MASK 0x00000008u
#define VPU_40XX_BUTTRESS_HM_ATS 0x0000000cu
#define VPU_40XX_BUTTRESS_ATS_ERR_LOG1 0x00000010u
#define VPU_40XX_BUTTRESS_ATS_ERR_LOG2 0x00000014u
#define VPU_40XX_BUTTRESS_ATS_ERR_CLEAR 0x00000018u
#define VPU_40XX_BUTTRESS_CFI0_ERR_LOG 0x0000001cu
#define VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR 0x00000020u
#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS 0x00000024u
#define VPU_40XX_BUTTRESS_CFI1_ERR_LOG 0x00000040u
#define VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR 0x00000044u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW 0x00000048u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH 0x0000004cu
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR 0x00000050u
#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS 0x00000054u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW 0x00000058u
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH 0x0000005cu
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR 0x00000060u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000130u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1 0x00000134u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000138u
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CDYN_MASK GENMASK(31, 16)
#define VPU_40XX_BUTTRESS_WP_REQ_CMD 0x0000013cu
#define VPU_40XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_PLL_FREQ 0x00000148u
#define VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
#define VPU_40XX_BUTTRESS_TILE_FUSE 0x00000150u
#define VPU_40XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
#define VPU_40XX_BUTTRESS_VPU_STATUS 0x00000154u
#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
#define VPU_40XX_BUTTRESS_IP_RESET 0x00000160u
#define VPU_40XX_BUTTRESS_IP_RESET_TRIGGER_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_D0I3_CONTROL 0x00000164u
#define VPU_40XX_BUTTRESS_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_D0I3_CONTROL_I3_MASK BIT_MASK(2)
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000168u
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x0000016cu
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000170u
#define VPU_40XX_BUTTRESS_FMIN_FUSE 0x00000174u
#define VPU_40XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
#define VPU_40XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
#define VPU_40XX_BUTTRESS_FMAX_FUSE 0x00000178u
#define VPU_40XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
#define VPU_40XX_HOST_SS_CPR_CLK_EN 0x00000080u
#define VPU_40XX_HOST_SS_CPR_CLK_EN_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_CLK_EN_DSS_MAS_MASK BIT_MASK(10)
#define VPU_40XX_HOST_SS_CPR_CLK_EN_CSS_MAS_MASK BIT_MASK(11)
#define VPU_40XX_HOST_SS_CPR_CLK_SET 0x00000084u
#define VPU_40XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
#define VPU_40XX_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
#define VPU_40XX_HOST_SS_CPR_RST_EN 0x00000090u
#define VPU_40XX_HOST_SS_CPR_RST_EN_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_RST_EN_DSS_MAS_MASK BIT_MASK(10)
#define VPU_40XX_HOST_SS_CPR_RST_EN_CSS_MAS_MASK BIT_MASK(11)
#define VPU_40XX_HOST_SS_CPR_RST_SET 0x00000094u
#define VPU_40XX_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
#define VPU_40XX_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
#define VPU_40XX_HOST_SS_CPR_RST_CLR 0x00000098u
#define VPU_40XX_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
#define VPU_40XX_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
#define VPU_40XX_HOST_SS_HW_VERSION 0x00000108u
#define VPU_40XX_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
#define VPU_40XX_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
#define VPU_40XX_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
#define VPU_40XX_HOST_SS_SW_VERSION 0x0000010cu
#define VPU_40XX_HOST_SS_GEN_CTRL 0x00000118u
#define VPU_40XX_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
#define VPU_40XX_HOST_SS_NOC_QREQN 0x00000154u
#define VPU_40XX_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_NOC_QACCEPTN 0x00000158u
#define VPU_40XX_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_NOC_QDENY 0x0000015cu
#define VPU_40XX_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
#define VPU_40XX_TOP_NOC_QREQN 0x00000160u
#define VPU_40XX_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
#define VPU_40XX_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(2)
#define VPU_40XX_TOP_NOC_QACCEPTN 0x00000164u
#define VPU_40XX_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
#define VPU_40XX_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(2)
#define VPU_40XX_TOP_NOC_QDENY 0x00000168u
#define VPU_40XX_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
#define VPU_40XX_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(2)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
#define VPU_40XX_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
#define VPU_40XX_HOST_SS_ICB_STATUS_0 0x00010210u
#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
#define VPU_40XX_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
#define VPU_40XX_HOST_SS_ICB_STATUS_1 0x00010214u
#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
#define VPU_40XX_HOST_SS_ICB_CLEAR_0 0x00010220u
#define VPU_40XX_HOST_SS_ICB_CLEAR_1 0x00010224u
#define VPU_40XX_HOST_SS_ICB_ENABLE_0 0x00010240u
#define VPU_40XX_HOST_SS_ICB_ENABLE_1 0x00010244u
#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
#define VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
#define VPU_40XX_HOST_SS_AON_PWR_ISO_EN0_CSS_CPU_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0_CSS_CPU_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_CSS_CPU_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0_CSS_CPU_MASK BIT_MASK(3)
#define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1)
#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u
#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO 0x00040040u
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_DONE_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
#define VPU_40XX_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_SNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AW_SNOOP_OVERRIDE_MASK BIT_MASK(4)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_AR_SNOOP_OVERRIDE_MASK BIT_MASK(5)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
#define VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV 0x00360004u
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
#define VPU_40XX_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
#define VPU_40XX_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
#define VPU_40XX_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
#define VPU_40XX_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
#define VPU_40XX_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
#define VPU_40XX_CPU_SS_TIM_WATCHDOG 0x0102009cu
#define VPU_40XX_CPU_SS_TIM_WDOG_EN 0x010200a4u
#define VPU_40XX_CPU_SS_TIM_SAFE 0x010200a8u
#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG 0x01021008u
#define VPU_40XX_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
#define VPU_40XX_CPU_SS_CPR_NOC_QREQN 0x01010030u
#define VPU_40XX_CPU_SS_CPR_NOC_QREQN_TOP_MMIO_MASK BIT_MASK(0)
#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN 0x01010034u
#define VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN_TOP_MMIO_MASK BIT_MASK(0)
#define VPU_40XX_CPU_SS_CPR_NOC_QDENY 0x01010038u
#define VPU_40XX_CPU_SS_CPR_NOC_QDENY_TOP_MMIO_MASK BIT_MASK(0)
#define VPU_40XX_CPU_SS_TIM_IPC_FIFO 0x010200f0u
#define VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT 0x01029008u
#define VPU_40XX_CPU_SS_DOORBELL_0 0x01300000u
#define VPU_40XX_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
#define VPU_40XX_CPU_SS_DOORBELL_1 0x01301000u
#endif /* __IVPU_HW_40XX_REG_H__ */

View file

@ -1,281 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020-2023 Intel Corporation
*/
#ifndef __IVPU_HW_MTL_REG_H__
#define __IVPU_HW_MTL_REG_H__
#include <linux/bits.h>
#define MTL_BUTTRESS_INTERRUPT_TYPE 0x00000000u
#define MTL_BUTTRESS_INTERRUPT_STAT 0x00000004u
#define MTL_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
#define MTL_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
#define MTL_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
#define MTL_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
#define MTL_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
#define MTL_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
#define MTL_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
#define MTL_BUTTRESS_WP_REQ_CMD 0x00000014u
#define MTL_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
#define MTL_BUTTRESS_WP_DOWNLOAD 0x00000018u
#define MTL_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
#define MTL_BUTTRESS_CURRENT_PLL 0x0000001cu
#define MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
#define MTL_BUTTRESS_PLL_ENABLE 0x00000020u
#define MTL_BUTTRESS_FMIN_FUSE 0x00000024u
#define MTL_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
#define MTL_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
#define MTL_BUTTRESS_FMAX_FUSE 0x00000028u
#define MTL_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
#define MTL_BUTTRESS_TILE_FUSE 0x0000002cu
#define MTL_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define MTL_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
#define MTL_BUTTRESS_LOCAL_INT_MASK 0x00000030u
#define MTL_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
#define MTL_BUTTRESS_PLL_STATUS 0x00000040u
#define MTL_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
#define MTL_BUTTRESS_VPU_STATUS 0x00000044u
#define MTL_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
#define MTL_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define MTL_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
#define MTL_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
#define MTL_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
#define MTL_BUTTRESS_VPU_IP_RESET 0x00000050u
#define MTL_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
#define MTL_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
#define MTL_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
#define MTL_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
#define MTL_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
#define MTL_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
#define MTL_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
#define MTL_BUTTRESS_UFI_ERR_LOG 0x000000b0u
#define MTL_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
#define MTL_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
#define MTL_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
#define MTL_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
#define MTL_VPU_HOST_SS_CPR_CLK_SET 0x00000084u
#define MTL_VPU_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
#define MTL_VPU_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
#define MTL_VPU_HOST_SS_CPR_RST_SET 0x00000094u
#define MTL_VPU_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
#define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
#define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u
#define MTL_VPU_HOST_SS_CPR_RST_CLR_AON_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
#define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
#define MTL_VPU_HOST_SS_HW_VERSION 0x00000108u
#define MTL_VPU_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
#define MTL_VPU_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
#define MTL_VPU_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
#define MTL_VPU_HOST_SS_GEN_CTRL 0x00000118u
#define MTL_VPU_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
#define MTL_VPU_HOST_SS_NOC_QREQN 0x00000154u
#define MTL_VPU_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_NOC_QACCEPTN 0x00000158u
#define MTL_VPU_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_NOC_QDENY 0x0000015cu
#define MTL_VPU_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QREQN 0x00000160u
#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u
#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define MTL_VPU_TOP_NOC_QDENY 0x00000168u
#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
#define MTL_VPU_HOST_SS_ICB_STATUS_0 0x00010210u
#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
#define MTL_VPU_HOST_SS_ICB_STATUS_1 0x00010214u
#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
#define MTL_VPU_HOST_SS_ICB_CLEAR_0 0x00010220u
#define MTL_VPU_HOST_SS_ICB_CLEAR_1 0x00010224u
#define MTL_VPU_HOST_SS_ICB_ENABLE_0 0x00010240u
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0)
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8)
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24)
#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3)
#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u
#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE 0x00030204u
#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO 0x00041040u
#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0)
#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
#define MTL_VPU_HOST_MMU_IDR0 0x00200000u
#define MTL_VPU_HOST_MMU_IDR1 0x00200004u
#define MTL_VPU_HOST_MMU_IDR3 0x0020000cu
#define MTL_VPU_HOST_MMU_IDR5 0x00200014u
#define MTL_VPU_HOST_MMU_CR0 0x00200020u
#define MTL_VPU_HOST_MMU_CR0ACK 0x00200024u
#define MTL_VPU_HOST_MMU_CR1 0x00200028u
#define MTL_VPU_HOST_MMU_CR2 0x0020002cu
#define MTL_VPU_HOST_MMU_IRQ_CTRL 0x00200050u
#define MTL_VPU_HOST_MMU_IRQ_CTRLACK 0x00200054u
#define MTL_VPU_HOST_MMU_GERROR 0x00200060u
#define MTL_VPU_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0)
#define MTL_VPU_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
#define MTL_VPU_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
#define MTL_VPU_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
#define MTL_VPU_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
#define MTL_VPU_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
#define MTL_VPU_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7)
#define MTL_VPU_HOST_MMU_GERRORN 0x00200064u
#define MTL_VPU_HOST_MMU_STRTAB_BASE 0x00200080u
#define MTL_VPU_HOST_MMU_STRTAB_BASE_CFG 0x00200088u
#define MTL_VPU_HOST_MMU_CMDQ_BASE 0x00200090u
#define MTL_VPU_HOST_MMU_CMDQ_PROD 0x00200098u
#define MTL_VPU_HOST_MMU_CMDQ_CONS 0x0020009cu
#define MTL_VPU_HOST_MMU_EVTQ_BASE 0x002000a0u
#define MTL_VPU_HOST_MMU_EVTQ_PROD 0x002000a8u
#define MTL_VPU_HOST_MMU_EVTQ_CONS 0x002000acu
#define MTL_VPU_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
#define MTL_VPU_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV 0x00360004u
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3)
#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4)
#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu
#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u
#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u
#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u
#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u
#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u
#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u
#endif /* __IVPU_HW_MTL_REG_H__ */

View file

@ -7,7 +7,7 @@
#include <linux/highmem.h>
#include "ivpu_drv.h"
#include "ivpu_hw_mtl_reg.h"
#include "ivpu_hw_37xx_reg.h"
#include "ivpu_hw_reg_io.h"
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
@ -186,13 +186,13 @@
#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
(REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT)))
#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
(REG_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT)))
static char *ivpu_mmu_event_to_str(u32 cmd)
{
@ -250,15 +250,15 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else
val_ref = IVPU_MMU_IDR0_REF;
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR0);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR0);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR1);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR1);
if (val != IVPU_MMU_IDR1_REF)
ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR3);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR3);
if (val != IVPU_MMU_IDR3_REF)
ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
@ -269,7 +269,7 @@ static void ivpu_mmu_config_check(struct ivpu_device *vdev)
else
val_ref = IVPU_MMU_IDR5_REF;
val = REGV_RD32(MTL_VPU_HOST_MMU_IDR5);
val = REGV_RD32(VPU_37XX_HOST_MMU_IDR5);
if (val != val_ref)
ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
}
@ -396,18 +396,18 @@ static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
int ret;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, 0);
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, 0);
if (ret)
return ret;
return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, irq_ctrl);
return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_IRQ_CTRL, irq_ctrl);
}
static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
{
struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
return REGV_POLL(MTL_VPU_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
IVPU_MMU_QUEUE_TIMEOUT_US);
}
@ -447,7 +447,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
return ret;
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, q->prod);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, q->prod);
ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
if (ret)
@ -495,7 +495,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, 0);
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, 0);
if (ret)
return ret;
@ -505,17 +505,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
REGV_WR32(MTL_VPU_HOST_MMU_CR1, val);
REGV_WR32(VPU_37XX_HOST_MMU_CR1, val);
REGV_WR64(MTL_VPU_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
REGV_WR32(MTL_VPU_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
REGV_WR64(VPU_37XX_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
REGV_WR64(MTL_VPU_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, 0);
REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_CONS, 0);
REGV_WR64(VPU_37XX_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_PROD, 0);
REGV_WR32(VPU_37XX_HOST_MMU_CMDQ_CONS, 0);
val = IVPU_MMU_CR0_CMDQEN;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
@ -531,17 +531,17 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
if (ret)
return ret;
REGV_WR64(MTL_VPU_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC, 0);
REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, 0);
REGV_WR64(VPU_37XX_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC, 0);
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, 0);
val |= IVPU_MMU_CR0_EVTQEN;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
val |= IVPU_MMU_CR0_ATSCHK;
ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
ret = ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
if (ret)
return ret;
@ -550,7 +550,7 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
return ret;
val |= IVPU_MMU_CR0_SMMUEN;
return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
return ivpu_mmu_reg_write(vdev, VPU_37XX_HOST_MMU_CR0, val);
}
static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
@ -801,14 +801,14 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
evtq->prod = REGV_RD32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC);
evtq->prod = REGV_RD32(VPU_37XX_HOST_MMU_EVTQ_PROD_SEC);
if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
return NULL;
clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
REGV_WR32(VPU_37XX_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
return evt;
}
@ -841,35 +841,35 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
ivpu_dbg(vdev, IRQ, "MMU error\n");
gerror_val = REGV_RD32(MTL_VPU_HOST_MMU_GERROR);
gerrorn_val = REGV_RD32(MTL_VPU_HOST_MMU_GERRORN);
gerror_val = REGV_RD32(VPU_37XX_HOST_MMU_GERROR);
gerrorn_val = REGV_RD32(VPU_37XX_HOST_MMU_GERRORN);
active = gerror_val ^ gerrorn_val;
if (!(active & IVPU_MMU_GERROR_ERR_MASK))
return;
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, PRIQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, EVTQ_ABT, active))
ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ, active))
if (REG_TEST_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ, active))
ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
REGV_WR32(MTL_VPU_HOST_MMU_GERRORN, gerror_val);
REGV_WR32(VPU_37XX_HOST_MMU_GERRORN, gerror_val);
}
int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)

View file

@ -431,11 +431,11 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
return ret;
if (!context_id) {
start = vdev->hw->ranges.global_low.start;
end = vdev->hw->ranges.global_high.end;
start = vdev->hw->ranges.global.start;
end = vdev->hw->ranges.shave.end;
} else {
start = vdev->hw->ranges.user_low.start;
end = vdev->hw->ranges.user_high.end;
start = vdev->hw->ranges.user.start;
end = vdev->hw->ranges.dma.end;
}
drm_mm_init(&ctx->mm, start, end - start);

View file

@ -135,7 +135,6 @@ config DRM_DEBUG_MODESET_LOCK
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
select FRAMEBUFFER_CONSOLE if !EXPERT
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
default y
help
@ -196,6 +195,21 @@ config DRM_TTM
GPU memory types. Will be enabled automatically if a device driver
uses it.
config DRM_TTM_KUNIT_TEST
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
default n
depends on DRM && KUNIT
select DRM_TTM
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
default KUNIT_ALL_TESTS
help
Enables unit tests for TTM, a GPU memory manager subsystem used
to manage memory buffers. This option is mostly useful for kernel
developers.
If in doubt, say "N".
config DRM_EXEC
tristate
depends on DRM

View file

@ -68,7 +68,11 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
struct regmap *regmap = crtc->dc->hlcdc->regmap;
struct drm_display_mode *adj = &c->state->adjusted_mode;
struct drm_encoder *encoder = NULL, *en_iter;
struct drm_connector *connector = NULL;
struct atmel_hlcdc_crtc_state *state;
struct drm_device *ddev = c->dev;
struct drm_connector_list_iter iter;
unsigned long mode_rate;
struct videomode vm;
unsigned long prate;
@ -76,6 +80,23 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
unsigned int cfg = 0;
int div, ret;
/* get encoder from crtc */
drm_for_each_encoder(en_iter, ddev) {
if (en_iter->crtc == c) {
encoder = en_iter;
break;
}
}
if (encoder) {
/* Get the connector from encoder */
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter)
if (connector->encoder == encoder)
break;
drm_connector_list_iter_end(&iter);
}
ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
if (ret)
return;
@ -134,6 +155,10 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
cfg |= ATMEL_HLCDC_CLKDIV(div);
if (connector &&
connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
cfg |= ATMEL_HLCDC_CLKPOL;
regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0), mask, cfg);
state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state);

View file

@ -2449,15 +2449,7 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi)
enum drm_connector_status result;
result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
mutex_lock(&hdmi->mutex);
if (result != hdmi->last_connector_result) {
dev_dbg(hdmi->dev, "read_hpd result: %d", result);
handle_plugged_change(hdmi,
result == connector_status_connected);
hdmi->last_connector_result = result;
}
mutex_unlock(&hdmi->mutex);
hdmi->last_connector_result = result;
return result;
}
@ -2958,6 +2950,7 @@ static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
hdmi->curr_conn = NULL;
dw_hdmi_update_power(hdmi);
dw_hdmi_update_phy_mask(hdmi);
handle_plugged_change(hdmi, false);
mutex_unlock(&hdmi->mutex);
}
@ -2976,6 +2969,7 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
hdmi->curr_conn = connector;
dw_hdmi_update_power(hdmi);
dw_hdmi_update_phy_mask(hdmi);
handle_plugged_change(hdmi, true);
mutex_unlock(&hdmi->mutex);
}

View file

@ -496,6 +496,8 @@ struct lsdc_bo *lsdc_bo_create_kernel_pinned(struct drm_device *ddev,
int ret;
lbo = lsdc_bo_create(ddev, domain, size, true, NULL, NULL);
if (IS_ERR(lbo))
return ERR_CAST(lbo);
ret = lsdc_bo_reserve(lbo);
if (unlikely(ret)) {

View file

@ -1226,7 +1226,7 @@ static const struct of_device_id mcde_dsi_of_match[] = {
struct platform_driver mcde_dsi_driver = {
.driver = {
.name = "mcde-dsi",
.of_match_table = of_match_ptr(mcde_dsi_of_match),
.of_match_table = mcde_dsi_of_match,
},
.probe = mcde_dsi_probe,
.remove_new = mcde_dsi_remove,

View file

@ -47,6 +47,9 @@ nouveau-y += nouveau_prime.o
nouveau-y += nouveau_sgdma.o
nouveau-y += nouveau_ttm.o
nouveau-y += nouveau_vmm.o
nouveau-y += nouveau_exec.o
nouveau-y += nouveau_sched.o
nouveau-y += nouveau_uvmm.o
# DRM - modesetting
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o

View file

@ -10,6 +10,8 @@ config DRM_NOUVEAU
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
select DRM_EXEC
select DRM_SCHED
select I2C
select I2C_ALGOBIT
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT

View file

@ -1122,11 +1122,18 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
PUSH_KICK(push);
ret = nouveau_fence_new(chan, false, pfence);
ret = nouveau_fence_new(pfence);
if (ret)
goto fail;
ret = nouveau_fence_emit(*pfence, chan);
if (ret)
goto fail_fence_unref;
return 0;
fail_fence_unref:
nouveau_fence_unref(pfence);
fail:
spin_lock_irqsave(&dev->event_lock, flags);
list_del(&s->head);

View file

@ -3,7 +3,10 @@
struct nvif_vmm_v0 {
__u8 version;
__u8 page_nr;
__u8 managed;
#define NVIF_VMM_V0_TYPE_UNMANAGED 0x00
#define NVIF_VMM_V0_TYPE_MANAGED 0x01
#define NVIF_VMM_V0_TYPE_RAW 0x02
__u8 type;
__u8 pad03[5];
__u64 addr;
__u64 size;
@ -17,6 +20,7 @@ struct nvif_vmm_v0 {
#define NVIF_VMM_V0_UNMAP 0x04
#define NVIF_VMM_V0_PFNMAP 0x05
#define NVIF_VMM_V0_PFNCLR 0x06
#define NVIF_VMM_V0_RAW 0x07
#define NVIF_VMM_V0_MTHD(i) ((i) + 0x80)
struct nvif_vmm_page_v0 {
@ -66,6 +70,26 @@ struct nvif_vmm_unmap_v0 {
__u64 addr;
};
struct nvif_vmm_raw_v0 {
__u8 version;
#define NVIF_VMM_RAW_V0_GET 0x0
#define NVIF_VMM_RAW_V0_PUT 0x1
#define NVIF_VMM_RAW_V0_MAP 0x2
#define NVIF_VMM_RAW_V0_UNMAP 0x3
#define NVIF_VMM_RAW_V0_SPARSE 0x4
__u8 op;
__u8 sparse;
__u8 ref;
__u8 shift;
__u32 argc;
__u8 pad01[7];
__u64 addr;
__u64 size;
__u64 offset;
__u64 memory;
__u64 argv;
};
struct nvif_vmm_pfnmap_v0 {
__u8 version;
__u8 page;

View file

@ -4,6 +4,12 @@
struct nvif_mem;
struct nvif_mmu;
enum nvif_vmm_type {
UNMANAGED,
MANAGED,
RAW,
};
enum nvif_vmm_get {
ADDR,
PTES,
@ -30,8 +36,9 @@ struct nvif_vmm {
int page_nr;
};
int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass, bool managed,
u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *);
int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass,
enum nvif_vmm_type, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_vmm *);
void nvif_vmm_dtor(struct nvif_vmm *);
int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
u8 page, u8 align, u64 size, struct nvif_vma *);
@ -39,4 +46,12 @@ void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_mem *, u64 offset);
int nvif_vmm_unmap(struct nvif_vmm *, u64);
int nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
int nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
int nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
void *argv, u32 argc, struct nvif_mem *mem, u64 offset);
int nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
u8 shift, bool sparse);
int nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref);
#endif

View file

@ -17,6 +17,7 @@ struct nvkm_vma {
bool part:1; /* Region was split from an allocated region by map(). */
bool busy:1; /* Region busy (for temporarily preventing user access). */
bool mapped:1; /* Region contains valid pages. */
bool no_comp:1; /* Force no memory compression. */
struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
struct nvkm_tags *tags; /* Compression tag reference. */
};
@ -27,10 +28,26 @@ struct nvkm_vmm {
const char *name;
u32 debug;
struct kref kref;
struct mutex mutex;
struct {
struct mutex vmm;
struct mutex ref;
struct mutex map;
} mutex;
u64 start;
u64 limit;
struct {
struct {
u64 addr;
u64 size;
} p;
struct {
u64 addr;
u64 size;
} n;
bool raw;
} managed;
struct nvkm_vmm_pt *pd;
struct list_head join;
@ -70,6 +87,7 @@ struct nvkm_vmm_map {
const struct nvkm_vmm_page *page;
bool no_comp;
struct nvkm_tags *tags;
u64 next;
u64 type;

View file

@ -35,6 +35,7 @@
#include "nouveau_chan.h"
#include "nouveau_abi16.h"
#include "nouveau_vmm.h"
#include "nouveau_sched.h"
static struct nouveau_abi16 *
nouveau_abi16(struct drm_file *file_priv)
@ -125,6 +126,17 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
{
struct nouveau_abi16_ntfy *ntfy, *temp;
/* When a client exits without waiting for it's queued up jobs to
* finish it might happen that we fault the channel. This is due to
* drm_file_free() calling drm_gem_release() before the postclose()
* callback. Hence, we can't tear down this scheduler entity before
* uvmm mappings are unmapped. Currently, we can't detect this case.
*
* However, this should be rare and harmless, since the channel isn't
* needed anymore.
*/
nouveau_sched_entity_fini(&chan->sched_entity);
/* wait for all activity to stop before cleaning up */
if (chan->chan)
nouveau_channel_idle(chan->chan);
@ -261,6 +273,13 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (!drm->channel)
return nouveau_abi16_put(abi16, -ENODEV);
/* If uvmm wasn't initialized until now disable it completely to prevent
* userspace from mixing up UAPIs.
*
* The client lock is already acquired by nouveau_abi16_get().
*/
__nouveau_cli_disable_uvmm_noinit(cli);
device = &abi16->device;
engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
@ -304,6 +323,11 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
drm->sched_wq);
if (ret)
goto done;
init->channel = chan->chan->chid;
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)

View file

@ -26,6 +26,7 @@ struct nouveau_abi16_chan {
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
struct nouveau_sched_entity sched_entity;
};
struct nouveau_abi16 {
@ -43,28 +44,6 @@ int nouveau_abi16_usif(struct drm_file *, void *data, u32 size);
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
struct drm_nouveau_channel_alloc {
uint32_t fb_ctxdma_handle;
uint32_t tt_ctxdma_handle;
int channel;
uint32_t pushbuf_domains;
/* Notifier memory */
uint32_t notifier_handle;
/* DRM-enforced subchannel assignments */
struct {
uint32_t handle;
uint32_t grclass;
} subchan[8];
uint32_t nr_subchan;
};
struct drm_nouveau_channel_free {
int channel;
};
struct drm_nouveau_grobj_alloc {
int channel;
uint32_t handle;
@ -83,31 +62,12 @@ struct drm_nouveau_gpuobj_free {
uint32_t handle;
};
#define NOUVEAU_GETPARAM_PCI_VENDOR 3
#define NOUVEAU_GETPARAM_PCI_DEVICE 4
#define NOUVEAU_GETPARAM_BUS_TYPE 5
#define NOUVEAU_GETPARAM_FB_SIZE 8
#define NOUVEAU_GETPARAM_AGP_SIZE 9
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
#define NOUVEAU_GETPARAM_PTIMER_TIME 14
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
struct drm_nouveau_getparam {
uint64_t param;
uint64_t value;
};
struct drm_nouveau_setparam {
uint64_t param;
uint64_t value;
};
#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)

View file

@ -199,12 +199,12 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
struct nouveau_bo *
nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
u32 tile_mode, u32 tile_flags)
u32 tile_mode, u32 tile_flags, bool internal)
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
struct nvif_mmu *mmu = &cli->mmu;
struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm;
int i, pi = -1;
if (!*size) {
@ -215,6 +215,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
INIT_LIST_HEAD(&nvbo->vma_list);
@ -232,69 +233,104 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
nvbo->force_coherent = true;
}
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
} else
if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
nvbo->kind = (tile_flags & 0x00007f00) >> 8;
nvbo->comp = (tile_flags & 0x00030000) >> 16;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
} else {
nvbo->zeta = (tile_flags & 0x00000007);
}
nvbo->mode = tile_mode;
nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
if (!nouveau_cli_uvmm(cli) || internal) {
/* for BO noVM allocs, don't assign kinds */
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
* during buffer migration, we need to determine page
* size for the buffer up-front, and pre-allocate its
* page tables.
*
* Skip page sizes that can't support needed domains.
*/
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
(domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
} else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
nvbo->kind = (tile_flags & 0x00007f00) >> 8;
nvbo->comp = (tile_flags & 0x00030000) >> 16;
if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
} else {
nvbo->zeta = (tile_flags & 0x00000007);
}
nvbo->mode = tile_mode;
/* Select this page size if it's the first that supports
* the potential memory domains, or when it's compatible
* with the requested compression settings.
*/
if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
pi = i;
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
* during buffer migration, we need to determine page
* size for the buffer up-front, and pre-allocate its
* page tables.
*
* Skip page sizes that can't support needed domains.
*/
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
(domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
/* Stop once the buffer is larger than the current page size. */
if (*size >= 1ULL << vmm->page[i].shift)
break;
/* Select this page size if it's the first that supports
* the potential memory domains, or when it's compatible
* with the requested compression settings.
*/
if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
pi = i;
/* Stop once the buffer is larger than the current page size. */
if (*size >= 1ULL << vmm->page[i].shift)
break;
}
if (WARN_ON(pi < 0)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
/* Disable compression if suitable settings couldn't be found. */
if (nvbo->comp && !vmm->page[pi].comp) {
if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
nvbo->kind = mmu->kind[nvbo->kind];
nvbo->comp = 0;
}
nvbo->page = vmm->page[pi].shift;
} else {
/* reject other tile flags when in VM mode. */
if (tile_mode)
return ERR_PTR(-EINVAL);
if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
return ERR_PTR(-EINVAL);
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
* during buffer migration, we need to determine page
* size for the buffer up-front, and pre-allocate its
* page tables.
*
* Skip page sizes that can't support needed domains.
*/
if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
if (pi < 0)
pi = i;
/* Stop once the buffer is larger than the current page size. */
if (*size >= 1ULL << vmm->page[i].shift)
break;
}
if (WARN_ON(pi < 0)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
nvbo->page = vmm->page[pi].shift;
}
if (WARN_ON(pi < 0)) {
kfree(nvbo);
return ERR_PTR(-EINVAL);
}
/* Disable compression if suitable settings couldn't be found. */
if (nvbo->comp && !vmm->page[pi].comp) {
if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
nvbo->kind = mmu->kind[nvbo->kind];
nvbo->comp = 0;
}
nvbo->page = vmm->page[pi].shift;
nouveau_bo_fixup_align(nvbo, align, size);
return nvbo;
@ -306,18 +342,26 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
{
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
int ret;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
.resv = robj,
};
nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init_validate(nvbo->bo.bdev, &nvbo->bo, type,
&nvbo->placement, align >> PAGE_SHIFT, false,
ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type,
&nvbo->placement, align >> PAGE_SHIFT, &ctx,
sg, robj, nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
if (!robj)
ttm_bo_unreserve(&nvbo->bo);
return 0;
}
@ -331,7 +375,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
int ret;
nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags);
tile_flags, true);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
@ -339,6 +383,11 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
dma_resv_init(&nvbo->bo.base._resv);
drm_vma_node_reset(&nvbo->bo.base.vma_node);
/* This must be called before ttm_bo_init_reserved(). Subsequent
* bo_move() callbacks might already iterate the GEMs GPUVA list.
*/
drm_gem_gpuva_init(&nvbo->bo.base);
ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret)
return ret;
@ -817,29 +866,39 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
mutex_lock(&cli->mutex);
else
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) {
ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
/* TODO: figure out a better solution here
*
* wait on the fence here explicitly as going through
* ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
*
* Without this the operation can timeout and we'll fallback to a
* software copy, which might take several minutes to finish.
*/
nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict, false,
new_reg);
nouveau_fence_unref(&fence);
}
}
if (ret)
goto out_unlock;
ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
if (ret)
goto out_unlock;
ret = nouveau_fence_new(&fence);
if (ret)
goto out_unlock;
ret = nouveau_fence_emit(fence, chan);
if (ret) {
nouveau_fence_unref(&fence);
goto out_unlock;
}
/* TODO: figure out a better solution here
*
* wait on the fence here explicitly as going through
* ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
*
* Without this the operation can timeout and we'll fallback to a
* software copy, which might take several minutes to finish.
*/
nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false,
new_reg);
nouveau_fence_unref(&fence);
out_unlock:
mutex_unlock(&cli->mutex);
return ret;
}
@ -935,6 +994,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
list_for_each_entry(vma, &nvbo->vma_list, head) {
nouveau_vma_map(vma, mem);
}
nouveau_uvmm_bo_map_all(nvbo, mem);
} else {
list_for_each_entry(vma, &nvbo->vma_list, head) {
ret = dma_resv_wait_timeout(bo->base.resv,
@ -943,6 +1003,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
WARN_ON(ret <= 0);
nouveau_vma_unmap(vma);
}
nouveau_uvmm_bo_unmap_all(nvbo);
}
if (new_reg)

View file

@ -26,6 +26,7 @@ struct nouveau_bo {
struct list_head entry;
int pbbo_index;
bool validate_mapped;
bool no_share;
/* GPU address space is independent of CPU word size */
uint64_t offset;
@ -73,7 +74,7 @@ extern struct ttm_device_funcs nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
u32 domain, u32 tile_mode, u32 tile_flags);
u32 domain, u32 tile_mode, u32 tile_flags, bool internal);
int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj);
int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,

View file

@ -40,6 +40,14 @@ MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
void
nouveau_channel_kill(struct nouveau_channel *chan)
{
atomic_set(&chan->killed, 1);
if (chan->fence)
nouveau_fence_context_kill(chan->fence, -ENODEV);
}
static int
nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
{
@ -47,9 +55,9 @@ nouveau_channel_killed(struct nvif_event *event, void *repv, u32 repc)
struct nouveau_cli *cli = (void *)chan->user.client;
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
atomic_set(&chan->killed, 1);
if (chan->fence)
nouveau_fence_context_kill(chan->fence, -ENODEV);
if (unlikely(!atomic_read(&chan->killed)))
nouveau_channel_kill(chan);
return NVIF_EVENT_DROP;
}
@ -62,9 +70,11 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL;
int ret;
ret = nouveau_fence_new(chan, false, &fence);
ret = nouveau_fence_new(&fence);
if (!ret) {
ret = nouveau_fence_wait(fence, false, false);
ret = nouveau_fence_emit(fence, chan);
if (!ret)
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
@ -149,7 +159,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
chan->device = device;
chan->drm = drm;
chan->vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
chan->vmm = nouveau_cli_vmm(cli);
atomic_set(&chan->killed, 0);
/* allocate memory for dma push buffer */

View file

@ -66,6 +66,7 @@ int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *, bool priv,
u32 vram, u32 gart, struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
void nouveau_channel_kill(struct nouveau_channel *);
extern int nouveau_vram_pushbuf;

View file

@ -203,6 +203,44 @@ nouveau_debugfs_pstate_open(struct inode *inode, struct file *file)
return single_open(file, nouveau_debugfs_pstate_get, inode->i_private);
}
static void
nouveau_debugfs_gpuva_regions(struct seq_file *m, struct nouveau_uvmm *uvmm)
{
MA_STATE(mas, &uvmm->region_mt, 0, 0);
struct nouveau_uvma_region *reg;
seq_puts (m, " VA regions | start | range | end \n");
seq_puts (m, "----------------------------------------------------------------------------\n");
mas_for_each(&mas, reg, ULONG_MAX)
seq_printf(m, " | 0x%016llx | 0x%016llx | 0x%016llx\n",
reg->va.addr, reg->va.range, reg->va.addr + reg->va.range);
}
static int
nouveau_debugfs_gpuva(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
struct nouveau_cli *cli;
mutex_lock(&drm->clients_lock);
list_for_each_entry(cli, &drm->clients, head) {
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
if (!uvmm)
continue;
nouveau_uvmm_lock(uvmm);
drm_debugfs_gpuva_info(m, &uvmm->umgr);
seq_puts(m, "\n");
nouveau_debugfs_gpuva_regions(m, uvmm);
nouveau_uvmm_unlock(uvmm);
}
mutex_unlock(&drm->clients_lock);
return 0;
}
static const struct file_operations nouveau_pstate_fops = {
.owner = THIS_MODULE,
.open = nouveau_debugfs_pstate_open,
@ -214,6 +252,7 @@ static const struct file_operations nouveau_pstate_fops = {
static struct drm_info_list nouveau_debugfs_list[] = {
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "strap_peek", nouveau_debugfs_strap_peek, 0, NULL },
DRM_DEBUGFS_GPUVA_INFO(nouveau_debugfs_gpuva, NULL),
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)

View file

@ -209,7 +209,8 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
goto done;
}
nouveau_fence_new(dmem->migrate.chan, false, &fence);
if (!nouveau_fence_new(&fence))
nouveau_fence_emit(fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@ -402,7 +403,8 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
}
}
nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence);
if (!nouveau_fence_new(&fence))
nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
@ -675,7 +677,8 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
addr += PAGE_SIZE;
}
nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
if (!nouveau_fence_new(&fence))
nouveau_fence_emit(fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);

View file

@ -68,6 +68,9 @@
#include "nouveau_platform.h"
#include "nouveau_svm.h"
#include "nouveau_dmem.h"
#include "nouveau_exec.h"
#include "nouveau_uvmm.h"
#include "nouveau_sched.h"
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@ -196,6 +199,8 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
nouveau_uvmm_fini(&cli->uvmm);
nouveau_sched_entity_fini(&cli->sched_entity);
nouveau_vmm_fini(&cli->svm);
nouveau_vmm_fini(&cli->vmm);
nvif_mmu_dtor(&cli->mmu);
@ -301,6 +306,12 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
}
cli->mem = &mems[ret];
ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
drm->sched_wq);
if (ret)
goto done;
return 0;
done:
if (ret)
@ -568,10 +579,14 @@ nouveau_drm_device_init(struct drm_device *dev)
nvif_parent_ctor(&nouveau_parent, &drm->parent);
drm->master.base.object.parent = &drm->parent;
ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
ret = nouveau_sched_init(drm);
if (ret)
goto fail_alloc;
ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
if (ret)
goto fail_sched;
ret = nouveau_cli_init(drm, "DRM", &drm->client);
if (ret)
goto fail_master;
@ -628,7 +643,6 @@ nouveau_drm_device_init(struct drm_device *dev)
}
return 0;
fail_dispinit:
nouveau_display_destroy(dev);
fail_dispctor:
@ -641,6 +655,8 @@ nouveau_drm_device_init(struct drm_device *dev)
nouveau_cli_fini(&drm->client);
fail_master:
nouveau_cli_fini(&drm->master);
fail_sched:
nouveau_sched_fini(drm);
fail_alloc:
nvif_parent_dtor(&drm->parent);
kfree(drm);
@ -692,6 +708,8 @@ nouveau_drm_device_fini(struct drm_device *dev)
}
mutex_unlock(&drm->clients_lock);
nouveau_sched_fini(drm);
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
nvif_parent_dtor(&drm->parent);
@ -1193,6 +1211,9 @@ nouveau_ioctls[] = {
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_VM_INIT, nouveau_uvmm_ioctl_vm_init, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_VM_BIND, nouveau_uvmm_ioctl_vm_bind, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_EXEC, nouveau_exec_ioctl_exec, DRM_RENDER_ALLOW),
};
long
@ -1240,6 +1261,8 @@ nouveau_driver_fops = {
static struct drm_driver
driver_stub = {
.driver_features = DRIVER_GEM |
DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE |
DRIVER_GEM_GPUVA |
DRIVER_MODESET |
DRIVER_RENDER,
.open = nouveau_drm_open,

View file

@ -10,8 +10,8 @@
#define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 3
#define DRIVER_PATCHLEVEL 1
#define DRIVER_MINOR 4
#define DRIVER_PATCHLEVEL 0
/*
* 1.1.1:
@ -63,7 +63,9 @@ struct platform_device;
#include "nouveau_fence.h"
#include "nouveau_bios.h"
#include "nouveau_sched.h"
#include "nouveau_vmm.h"
#include "nouveau_uvmm.h"
struct nouveau_drm_tile {
struct nouveau_fence *fence;
@ -91,6 +93,10 @@ struct nouveau_cli {
struct nvif_mmu mmu;
struct nouveau_vmm vmm;
struct nouveau_vmm svm;
struct nouveau_uvmm uvmm;
struct nouveau_sched_entity sched_entity;
const struct nvif_mclass *mem;
struct list_head head;
@ -112,6 +118,59 @@ struct nouveau_cli_work {
struct dma_fence_cb cb;
};
static inline struct nouveau_uvmm *
nouveau_cli_uvmm(struct nouveau_cli *cli)
{
if (!cli || !cli->uvmm.vmm.cli)
return NULL;
return &cli->uvmm;
}
static inline struct nouveau_uvmm *
nouveau_cli_uvmm_locked(struct nouveau_cli *cli)
{
struct nouveau_uvmm *uvmm;
mutex_lock(&cli->mutex);
uvmm = nouveau_cli_uvmm(cli);
mutex_unlock(&cli->mutex);
return uvmm;
}
static inline struct nouveau_vmm *
nouveau_cli_vmm(struct nouveau_cli *cli)
{
struct nouveau_uvmm *uvmm;
uvmm = nouveau_cli_uvmm(cli);
if (uvmm)
return &uvmm->vmm;
if (cli->svm.cli)
return &cli->svm;
return &cli->vmm;
}
static inline void
__nouveau_cli_disable_uvmm_noinit(struct nouveau_cli *cli)
{
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
if (!uvmm)
cli->uvmm.disabled = true;
}
static inline void
nouveau_cli_disable_uvmm_noinit(struct nouveau_cli *cli)
{
mutex_lock(&cli->mutex);
__nouveau_cli_disable_uvmm_noinit(cli);
mutex_unlock(&cli->mutex);
}
void nouveau_cli_work_queue(struct nouveau_cli *, struct dma_fence *,
struct nouveau_cli_work *);
@ -121,6 +180,32 @@ nouveau_cli(struct drm_file *fpriv)
return fpriv ? fpriv->driver_priv : NULL;
}
static inline void
u_free(void *addr)
{
kvfree(addr);
}
static inline void *
u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
{
void *mem;
void __user *userptr = (void __force __user *)(uintptr_t)user;
size *= nmemb;
mem = kvmalloc(size, GFP_KERNEL);
if (!mem)
return ERR_PTR(-ENOMEM);
if (copy_from_user(mem, userptr, size)) {
u_free(mem);
return ERR_PTR(-EFAULT);
}
return mem;
}
#include <nvif/object.h>
#include <nvif/parent.h>
@ -222,6 +307,10 @@ struct nouveau_drm {
struct mutex lock;
bool component_registered;
} audio;
struct drm_gpu_scheduler sched;
struct workqueue_struct *sched_wq;
};
static inline struct nouveau_drm *

View file

@ -0,0 +1,411 @@
// SPDX-License-Identifier: MIT
#include <drm/drm_exec.h>
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_dma.h"
#include "nouveau_exec.h"
#include "nouveau_abi16.h"
#include "nouveau_chan.h"
#include "nouveau_sched.h"
#include "nouveau_uvmm.h"
/**
* DOC: Overview
*
* Nouveau's VM_BIND / EXEC UAPI consists of three ioctls: DRM_NOUVEAU_VM_INIT,
* DRM_NOUVEAU_VM_BIND and DRM_NOUVEAU_EXEC.
*
* In order to use the UAPI firstly a user client must initialize the VA space
* using the DRM_NOUVEAU_VM_INIT ioctl specifying which region of the VA space
* should be managed by the kernel and which by the UMD.
*
* The DRM_NOUVEAU_VM_BIND ioctl provides clients an interface to manage the
* userspace-managable portion of the VA space. It provides operations to map
* and unmap memory. Mappings may be flagged as sparse. Sparse mappings are not
* backed by a GEM object and the kernel will ignore GEM handles provided
* alongside a sparse mapping.
*
* Userspace may request memory backed mappings either within or outside of the
* bounds (but not crossing those bounds) of a previously mapped sparse
* mapping. Subsequently requested memory backed mappings within a sparse
* mapping will take precedence over the corresponding range of the sparse
* mapping. If such memory backed mappings are unmapped the kernel will make
* sure that the corresponding sparse mapping will take their place again.
* Requests to unmap a sparse mapping that still contains memory backed mappings
* will result in those memory backed mappings being unmapped first.
*
* Unmap requests are not bound to the range of existing mappings and can even
* overlap the bounds of sparse mappings. For such a request the kernel will
* make sure to unmap all memory backed mappings within the given range,
* splitting up memory backed mappings which are only partially contained
* within the given range. Unmap requests with the sparse flag set must match
* the range of a previously mapped sparse mapping exactly though.
*
* While the kernel generally permits arbitrary sequences and ranges of memory
* backed mappings being mapped and unmapped, either within a single or multiple
* VM_BIND ioctl calls, there are some restrictions for sparse mappings.
*
* The kernel does not permit to:
* - unmap non-existent sparse mappings
* - unmap a sparse mapping and map a new sparse mapping overlapping the range
* of the previously unmapped sparse mapping within the same VM_BIND ioctl
* - unmap a sparse mapping and map new memory backed mappings overlapping the
* range of the previously unmapped sparse mapping within the same VM_BIND
* ioctl
*
* When using the VM_BIND ioctl to request the kernel to map memory to a given
* virtual address in the GPU's VA space there is no guarantee that the actual
* mappings are created in the GPU's MMU. If the given memory is swapped out
* at the time the bind operation is executed the kernel will stash the mapping
* details into it's internal alloctor and create the actual MMU mappings once
* the memory is swapped back in. While this is transparent for userspace, it is
* guaranteed that all the backing memory is swapped back in and all the memory
* mappings, as requested by userspace previously, are actually mapped once the
* DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
*
* A VM_BIND job can be executed either synchronously or asynchronously. If
* exectued asynchronously, userspace may provide a list of syncobjs this job
* will wait for and/or a list of syncobj the kernel will signal once the
* VM_BIND job finished execution. If executed synchronously the ioctl will
* block until the bind job is finished. For synchronous jobs the kernel will
* not permit any syncobjs submitted to the kernel.
*
* To execute a push buffer the UAPI provides the DRM_NOUVEAU_EXEC ioctl. EXEC
* jobs are always executed asynchronously, and, equal to VM_BIND jobs, provide
* the option to synchronize them with syncobjs.
*
* Besides that, EXEC jobs can be scheduled for a specified channel to execute on.
*
* Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
* an up to date view of the VA space. However, the actual mappings might still
* be pending. Hence, EXEC jobs require to have the particular fences - of
* the corresponding VM_BIND jobs they depent on - attached to them.
*/
static int
nouveau_exec_job_submit(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_cli *cli = job->cli;
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
struct drm_exec *exec = &job->exec;
struct drm_gem_object *obj;
unsigned long index;
int ret;
ret = nouveau_fence_new(&exec_job->fence);
if (ret)
return ret;
nouveau_uvmm_lock(uvmm);
drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES);
drm_exec_until_all_locked(exec) {
struct drm_gpuva *va;
drm_gpuva_for_each_va(va, &uvmm->umgr) {
if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
continue;
ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
drm_exec_retry_on_contention(exec);
if (ret)
goto err_uvmm_unlock;
}
}
nouveau_uvmm_unlock(uvmm);
drm_exec_for_each_locked_object(exec, index, obj) {
struct nouveau_bo *nvbo = nouveau_gem_object(obj);
ret = nouveau_bo_validate(nvbo, true, false);
if (ret)
goto err_exec_fini;
}
return 0;
err_uvmm_unlock:
nouveau_uvmm_unlock(uvmm);
err_exec_fini:
drm_exec_fini(exec);
return ret;
}
static void
nouveau_exec_job_armed_submit(struct nouveau_job *job)
{
struct drm_exec *exec = &job->exec;
struct drm_gem_object *obj;
unsigned long index;
drm_exec_for_each_locked_object(exec, index, obj)
dma_resv_add_fence(obj->resv, job->done_fence, job->resv_usage);
drm_exec_fini(exec);
}
static struct dma_fence *
nouveau_exec_job_run(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_channel *chan = exec_job->chan;
struct nouveau_fence *fence = exec_job->fence;
int i, ret;
ret = nouveau_dma_wait(chan, exec_job->push.count + 1, 16);
if (ret) {
NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret);
return ERR_PTR(ret);
}
for (i = 0; i < exec_job->push.count; i++) {
nv50_dma_push(chan, exec_job->push.s[i].va,
exec_job->push.s[i].va_len);
}
ret = nouveau_fence_emit(fence, chan);
if (ret) {
NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
return ERR_PTR(ret);
}
exec_job->fence = NULL;
return &fence->base;
}
static void
nouveau_exec_job_free(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
nouveau_job_free(job);
nouveau_fence_unref(&exec_job->fence);
kfree(exec_job->push.s);
kfree(exec_job);
}
static enum drm_gpu_sched_stat
nouveau_exec_job_timeout(struct nouveau_job *job)
{
struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job);
struct nouveau_channel *chan = exec_job->chan;
if (unlikely(!atomic_read(&chan->killed)))
nouveau_channel_kill(chan);
NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n",
chan->chid);
nouveau_sched_entity_fini(job->entity);
return DRM_GPU_SCHED_STAT_ENODEV;
}
static struct nouveau_job_ops nouveau_exec_job_ops = {
.submit = nouveau_exec_job_submit,
.armed_submit = nouveau_exec_job_armed_submit,
.run = nouveau_exec_job_run,
.free = nouveau_exec_job_free,
.timeout = nouveau_exec_job_timeout,
};
int
nouveau_exec_job_init(struct nouveau_exec_job **pjob,
struct nouveau_exec_job_args *__args)
{
struct nouveau_exec_job *job;
struct nouveau_job_args args = {};
int ret;
job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job)
return -ENOMEM;
job->push.count = __args->push.count;
if (__args->push.count) {
job->push.s = kmemdup(__args->push.s,
sizeof(*__args->push.s) *
__args->push.count,
GFP_KERNEL);
if (!job->push.s) {
ret = -ENOMEM;
goto err_free_job;
}
}
job->chan = __args->chan;
args.sched_entity = __args->sched_entity;
args.file_priv = __args->file_priv;
args.in_sync.count = __args->in_sync.count;
args.in_sync.s = __args->in_sync.s;
args.out_sync.count = __args->out_sync.count;
args.out_sync.s = __args->out_sync.s;
args.ops = &nouveau_exec_job_ops;
args.resv_usage = DMA_RESV_USAGE_WRITE;
ret = nouveau_job_init(&job->base, &args);
if (ret)
goto err_free_pushs;
return 0;
err_free_pushs:
kfree(job->push.s);
err_free_job:
kfree(job);
*pjob = NULL;
return ret;
}
static int
nouveau_exec(struct nouveau_exec_job_args *args)
{
struct nouveau_exec_job *job;
int ret;
ret = nouveau_exec_job_init(&job, args);
if (ret)
return ret;
ret = nouveau_job_submit(&job->base);
if (ret)
goto err_job_fini;
return 0;
err_job_fini:
nouveau_job_fini(&job->base);
return ret;
}
static int
nouveau_exec_ucopy(struct nouveau_exec_job_args *args,
struct drm_nouveau_exec *req)
{
struct drm_nouveau_sync **s;
u32 inc = req->wait_count;
u64 ins = req->wait_ptr;
u32 outc = req->sig_count;
u64 outs = req->sig_ptr;
u32 pushc = req->push_count;
u64 pushs = req->push_ptr;
int ret;
if (pushc) {
args->push.count = pushc;
args->push.s = u_memcpya(pushs, pushc, sizeof(*args->push.s));
if (IS_ERR(args->push.s))
return PTR_ERR(args->push.s);
}
if (inc) {
s = &args->in_sync.s;
args->in_sync.count = inc;
*s = u_memcpya(ins, inc, sizeof(**s));
if (IS_ERR(*s)) {
ret = PTR_ERR(*s);
goto err_free_pushs;
}
}
if (outc) {
s = &args->out_sync.s;
args->out_sync.count = outc;
*s = u_memcpya(outs, outc, sizeof(**s));
if (IS_ERR(*s)) {
ret = PTR_ERR(*s);
goto err_free_ins;
}
}
return 0;
err_free_pushs:
u_free(args->push.s);
err_free_ins:
u_free(args->in_sync.s);
return ret;
}
static void
nouveau_exec_ufree(struct nouveau_exec_job_args *args)
{
u_free(args->push.s);
u_free(args->in_sync.s);
u_free(args->out_sync.s);
}
int
nouveau_exec_ioctl_exec(struct drm_device *dev,
void *data,
struct drm_file *file_priv)
{
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_abi16_chan *chan16;
struct nouveau_channel *chan = NULL;
struct nouveau_exec_job_args args = {};
struct drm_nouveau_exec *req = data;
int ret = 0;
if (unlikely(!abi16))
return -ENOMEM;
/* abi16 locks already */
if (unlikely(!nouveau_cli_uvmm(cli)))
return nouveau_abi16_put(abi16, -ENOSYS);
list_for_each_entry(chan16, &abi16->channels, head) {
if (chan16->chan->chid == req->channel) {
chan = chan16->chan;
break;
}
}
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
if (unlikely(atomic_read(&chan->killed)))
return nouveau_abi16_put(abi16, -ENODEV);
if (!chan->dma.ib_max)
return nouveau_abi16_put(abi16, -ENOSYS);
if (unlikely(req->push_count > NOUVEAU_GEM_MAX_PUSH)) {
NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->push_count, NOUVEAU_GEM_MAX_PUSH);
return nouveau_abi16_put(abi16, -EINVAL);
}
ret = nouveau_exec_ucopy(&args, req);
if (ret)
goto out;
args.sched_entity = &chan16->sched_entity;
args.file_priv = file_priv;
args.chan = chan;
ret = nouveau_exec(&args);
if (ret)
goto out_free_args;
out_free_args:
nouveau_exec_ufree(&args);
out:
return nouveau_abi16_put(abi16, ret);
}

View file

@ -0,0 +1,54 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_EXEC_H__
#define __NOUVEAU_EXEC_H__
#include <drm/drm_exec.h>
#include "nouveau_drv.h"
#include "nouveau_sched.h"
struct nouveau_exec_job_args {
struct drm_file *file_priv;
struct nouveau_sched_entity *sched_entity;
struct drm_exec exec;
struct nouveau_channel *chan;
struct {
struct drm_nouveau_sync *s;
u32 count;
} in_sync;
struct {
struct drm_nouveau_sync *s;
u32 count;
} out_sync;
struct {
struct drm_nouveau_exec_push *s;
u32 count;
} push;
};
struct nouveau_exec_job {
struct nouveau_job base;
struct nouveau_fence *fence;
struct nouveau_channel *chan;
struct {
struct drm_nouveau_exec_push *s;
u32 count;
} push;
};
#define to_nouveau_exec_job(job) \
container_of((job), struct nouveau_exec_job, base)
int nouveau_exec_job_init(struct nouveau_exec_job **job,
struct nouveau_exec_job_args *args);
int nouveau_exec_ioctl_exec(struct drm_device *dev, void *data,
struct drm_file *file_priv);
#endif

View file

@ -96,6 +96,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
if (nouveau_fence_signal(fence))
nvif_event_block(&fctx->event);
}
fctx->killed = 1;
spin_unlock_irqrestore(&fctx->lock, flags);
}
@ -210,6 +211,9 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
int ret;
if (unlikely(!chan->fence))
return -ENODEV;
fence->channel = chan;
fence->timeout = jiffies + (15 * HZ);
@ -226,6 +230,12 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
dma_fence_get(&fence->base);
spin_lock_irq(&fctx->lock);
if (unlikely(fctx->killed)) {
spin_unlock_irq(&fctx->lock);
dma_fence_put(&fence->base);
return -ENODEV;
}
if (nouveau_fence_update(chan, fctx))
nvif_event_block(&fctx->event);
@ -396,25 +406,16 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
}
int
nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
struct nouveau_fence **pfence)
nouveau_fence_new(struct nouveau_fence **pfence)
{
struct nouveau_fence *fence;
int ret = 0;
if (unlikely(!chan->fence))
return -ENODEV;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
ret = nouveau_fence_emit(fence, chan);
if (ret)
nouveau_fence_unref(&fence);
*pfence = fence;
return ret;
return 0;
}
static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)

View file

@ -17,8 +17,7 @@ struct nouveau_fence {
unsigned long timeout;
};
int nouveau_fence_new(struct nouveau_channel *, bool sysmem,
struct nouveau_fence **);
int nouveau_fence_new(struct nouveau_fence **);
void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
@ -45,7 +44,7 @@ struct nouveau_fence_chan {
char name[32];
struct nvif_event event;
int notify_ref, dead;
int notify_ref, dead, killed;
};
struct nouveau_fence_priv {

View file

@ -103,13 +103,17 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
int ret;
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return 0;
if (nvbo->no_share && uvmm && &uvmm->resv != nvbo->bo.base.resv)
return -EPERM;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return ret;
@ -120,7 +124,11 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out;
}
ret = nouveau_vma_new(nvbo, vmm, &vma);
/* only create a VMA on binding */
if (!nouveau_cli_uvmm(cli))
ret = nouveau_vma_new(nvbo, vmm, &vma);
else
ret = 0;
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
out:
@ -180,13 +188,16 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct device *dev = drm->dev->dev;
struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
int ret;
if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
return;
if (nouveau_cli_uvmm(cli))
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return;
@ -209,6 +220,7 @@ const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
.free = nouveau_gem_object_del,
.open = nouveau_gem_object_open,
.close = nouveau_gem_object_close,
.export = nouveau_gem_prime_export,
.pin = nouveau_gem_prime_pin,
.unpin = nouveau_gem_prime_unpin,
.get_sg_table = nouveau_gem_prime_get_sg_table,
@ -224,18 +236,28 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
struct nouveau_bo **pnvbo)
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli);
struct dma_resv *resv = NULL;
struct nouveau_bo *nvbo;
int ret;
if (domain & NOUVEAU_GEM_DOMAIN_NO_SHARE) {
if (unlikely(!uvmm))
return -EINVAL;
resv = &uvmm->resv;
}
if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
domain |= NOUVEAU_GEM_DOMAIN_CPU;
nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags);
tile_flags, false);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
nvbo->no_share = domain & NOUVEAU_GEM_DOMAIN_NO_SHARE;
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
@ -246,7 +268,14 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
return ret;
}
ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
if (resv)
dma_resv_lock(resv, NULL);
ret = nouveau_bo_init(nvbo, size, align, domain, NULL, resv);
if (resv)
dma_resv_unlock(resv);
if (ret)
return ret;
@ -269,7 +298,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
struct nouveau_vma *vma;
if (is_power_of_2(nvbo->valid_domains))
@ -279,13 +308,15 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->offset;
if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50 &&
!nouveau_cli_uvmm(cli)) {
vma = nouveau_vma_find(nvbo, vmm);
if (!vma)
return -EINVAL;
rep->offset = vma->addr;
}
} else
rep->offset = 0;
rep->size = nvbo->bo.base.size;
rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
@ -310,6 +341,11 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo = NULL;
int ret = 0;
/* If uvmm wasn't initialized until now disable it completely to prevent
* userspace from mixing up UAPIs.
*/
nouveau_cli_disable_uvmm_noinit(cli);
ret = nouveau_gem_new(cli, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
@ -613,32 +649,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return 0;
}
static inline void
u_free(void *addr)
{
kvfree(addr);
}
static inline void *
u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
{
void *mem;
void __user *userptr = (void __force __user *)(uintptr_t)user;
size *= nmemb;
mem = kvmalloc(size, GFP_KERNEL);
if (!mem)
return ERR_PTR(-ENOMEM);
if (copy_from_user(mem, userptr, size)) {
u_free(mem);
return ERR_PTR(-EFAULT);
}
return mem;
}
static int
nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf *req,
@ -747,6 +757,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (unlikely(!abi16))
return -ENOMEM;
if (unlikely(nouveau_cli_uvmm(cli)))
return -ENOSYS;
list_for_each_entry(temp, &abi16->channels, head) {
if (temp->chan->chid == req->channel) {
chan = temp->chan;
@ -899,8 +912,11 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
}
ret = nouveau_fence_new(chan, false, &fence);
ret = nouveau_fence_new(&fence);
if (!ret)
ret = nouveau_fence_emit(fence, chan);
if (ret) {
nouveau_fence_unref(&fence);
NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;

View file

@ -37,5 +37,6 @@ extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
struct drm_device *, struct dma_buf_attachment *, struct sg_table *);
struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
int flags);
#endif

View file

@ -35,4 +35,9 @@ int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
void nouveau_mem_fini(struct nouveau_mem *);
int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
int
nouveau_mem_map_fixed(struct nouveau_mem *mem,
struct nvif_vmm *vmm,
u8 kind, u64 addr,
u64 offset, u64 range);
#endif

View file

@ -50,7 +50,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
dma_resv_lock(robj, NULL);
nvbo = nouveau_bo_alloc(&drm->client, &size, &align,
NOUVEAU_GEM_DOMAIN_GART, 0, 0);
NOUVEAU_GEM_DOMAIN_GART, 0, 0, true);
if (IS_ERR(nvbo)) {
obj = ERR_CAST(nvbo);
goto unlock;
@ -102,3 +102,14 @@ void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
nouveau_bo_unpin(nvbo);
}
struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
int flags)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gobj);
if (nvbo->no_share)
return ERR_PTR(-EPERM);
return drm_gem_prime_export(gobj, flags);
}

View file

@ -0,0 +1,419 @@
// SPDX-License-Identifier: MIT
#include <linux/slab.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_syncobj.h>
#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_mem.h"
#include "nouveau_dma.h"
#include "nouveau_exec.h"
#include "nouveau_abi16.h"
#include "nouveau_sched.h"
/* FIXME
*
* We want to make sure that jobs currently executing can't be deferred by
* other jobs competing for the hardware. Otherwise we might end up with job
* timeouts just because of too many clients submitting too many jobs. We don't
* want jobs to time out because of system load, but because of the job being
* too bulky.
*
* For now allow for up to 16 concurrent jobs in flight until we know how many
* rings the hardware can process in parallel.
*/
#define NOUVEAU_SCHED_HW_SUBMISSIONS 16
#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
int
nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args)
{
struct nouveau_sched_entity *entity = args->sched_entity;
int ret;
job->file_priv = args->file_priv;
job->cli = nouveau_cli(args->file_priv);
job->entity = entity;
job->sync = args->sync;
job->resv_usage = args->resv_usage;
job->ops = args->ops;
job->in_sync.count = args->in_sync.count;
if (job->in_sync.count) {
if (job->sync)
return -EINVAL;
job->in_sync.data = kmemdup(args->in_sync.s,
sizeof(*args->in_sync.s) *
args->in_sync.count,
GFP_KERNEL);
if (!job->in_sync.data)
return -ENOMEM;
}
job->out_sync.count = args->out_sync.count;
if (job->out_sync.count) {
if (job->sync) {
ret = -EINVAL;
goto err_free_in_sync;
}
job->out_sync.data = kmemdup(args->out_sync.s,
sizeof(*args->out_sync.s) *
args->out_sync.count,
GFP_KERNEL);
if (!job->out_sync.data) {
ret = -ENOMEM;
goto err_free_in_sync;
}
job->out_sync.objs = kcalloc(job->out_sync.count,
sizeof(*job->out_sync.objs),
GFP_KERNEL);
if (!job->out_sync.objs) {
ret = -ENOMEM;
goto err_free_out_sync;
}
job->out_sync.chains = kcalloc(job->out_sync.count,
sizeof(*job->out_sync.chains),
GFP_KERNEL);
if (!job->out_sync.chains) {
ret = -ENOMEM;
goto err_free_objs;
}
}
ret = drm_sched_job_init(&job->base, &entity->base, NULL);
if (ret)
goto err_free_chains;
job->state = NOUVEAU_JOB_INITIALIZED;
return 0;
err_free_chains:
kfree(job->out_sync.chains);
err_free_objs:
kfree(job->out_sync.objs);
err_free_out_sync:
kfree(job->out_sync.data);
err_free_in_sync:
kfree(job->in_sync.data);
return ret;
}
void
nouveau_job_free(struct nouveau_job *job)
{
kfree(job->in_sync.data);
kfree(job->out_sync.data);
kfree(job->out_sync.objs);
kfree(job->out_sync.chains);
}
void nouveau_job_fini(struct nouveau_job *job)
{
dma_fence_put(job->done_fence);
drm_sched_job_cleanup(&job->base);
job->ops->free(job);
}
static int
sync_find_fence(struct nouveau_job *job,
struct drm_nouveau_sync *sync,
struct dma_fence **fence)
{
u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
u64 point = 0;
int ret;
if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
return -EOPNOTSUPP;
if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ)
point = sync->timeline_value;
ret = drm_syncobj_find_fence(job->file_priv,
sync->handle, point,
0 /* flags */, fence);
if (ret)
return ret;
return 0;
}
static int
nouveau_job_add_deps(struct nouveau_job *job)
{
struct dma_fence *in_fence = NULL;
int ret, i;
for (i = 0; i < job->in_sync.count; i++) {
struct drm_nouveau_sync *sync = &job->in_sync.data[i];
ret = sync_find_fence(job, sync, &in_fence);
if (ret) {
NV_PRINTK(warn, job->cli,
"Failed to find syncobj (-> in): handle=%d\n",
sync->handle);
return ret;
}
ret = drm_sched_job_add_dependency(&job->base, in_fence);
if (ret)
return ret;
}
return 0;
}
static void
nouveau_job_fence_attach_cleanup(struct nouveau_job *job)
{
int i;
for (i = 0; i < job->out_sync.count; i++) {
struct drm_syncobj *obj = job->out_sync.objs[i];
struct dma_fence_chain *chain = job->out_sync.chains[i];
if (obj)
drm_syncobj_put(obj);
if (chain)
dma_fence_chain_free(chain);
}
}
static int
nouveau_job_fence_attach_prepare(struct nouveau_job *job)
{
int i, ret;
for (i = 0; i < job->out_sync.count; i++) {
struct drm_nouveau_sync *sync = &job->out_sync.data[i];
struct drm_syncobj **pobj = &job->out_sync.objs[i];
struct dma_fence_chain **pchain = &job->out_sync.chains[i];
u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
if (stype != DRM_NOUVEAU_SYNC_SYNCOBJ &&
stype != DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
ret = -EINVAL;
goto err_sync_cleanup;
}
*pobj = drm_syncobj_find(job->file_priv, sync->handle);
if (!*pobj) {
NV_PRINTK(warn, job->cli,
"Failed to find syncobj (-> out): handle=%d\n",
sync->handle);
ret = -ENOENT;
goto err_sync_cleanup;
}
if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
*pchain = dma_fence_chain_alloc();
if (!*pchain) {
ret = -ENOMEM;
goto err_sync_cleanup;
}
}
}
return 0;
err_sync_cleanup:
nouveau_job_fence_attach_cleanup(job);
return ret;
}
static void
nouveau_job_fence_attach(struct nouveau_job *job)
{
struct dma_fence *fence = job->done_fence;
int i;
for (i = 0; i < job->out_sync.count; i++) {
struct drm_nouveau_sync *sync = &job->out_sync.data[i];
struct drm_syncobj **pobj = &job->out_sync.objs[i];
struct dma_fence_chain **pchain = &job->out_sync.chains[i];
u32 stype = sync->flags & DRM_NOUVEAU_SYNC_TYPE_MASK;
if (stype == DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ) {
drm_syncobj_add_point(*pobj, *pchain, fence,
sync->timeline_value);
} else {
drm_syncobj_replace_fence(*pobj, fence);
}
drm_syncobj_put(*pobj);
*pobj = NULL;
*pchain = NULL;
}
}
int
nouveau_job_submit(struct nouveau_job *job)
{
struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
struct dma_fence *done_fence = NULL;
int ret;
ret = nouveau_job_add_deps(job);
if (ret)
goto err;
ret = nouveau_job_fence_attach_prepare(job);
if (ret)
goto err;
/* Make sure the job appears on the sched_entity's queue in the same
* order as it was submitted.
*/
mutex_lock(&entity->mutex);
/* Guarantee we won't fail after the submit() callback returned
* successfully.
*/
if (job->ops->submit) {
ret = job->ops->submit(job);
if (ret)
goto err_cleanup;
}
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
if (job->sync)
done_fence = dma_fence_get(job->done_fence);
if (job->ops->armed_submit)
job->ops->armed_submit(job);
nouveau_job_fence_attach(job);
/* Set job state before pushing the job to the scheduler,
* such that we do not overwrite the job state set in run().
*/
job->state = NOUVEAU_JOB_SUBMIT_SUCCESS;
drm_sched_entity_push_job(&job->base);
mutex_unlock(&entity->mutex);
if (done_fence) {
dma_fence_wait(done_fence, true);
dma_fence_put(done_fence);
}
return 0;
err_cleanup:
mutex_unlock(&entity->mutex);
nouveau_job_fence_attach_cleanup(job);
err:
job->state = NOUVEAU_JOB_SUBMIT_FAILED;
return ret;
}
bool
nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
struct work_struct *work)
{
return queue_work(entity->sched_wq, work);
}
static struct dma_fence *
nouveau_job_run(struct nouveau_job *job)
{
struct dma_fence *fence;
fence = job->ops->run(job);
if (IS_ERR(fence))
job->state = NOUVEAU_JOB_RUN_FAILED;
else
job->state = NOUVEAU_JOB_RUN_SUCCESS;
return fence;
}
static struct dma_fence *
nouveau_sched_run_job(struct drm_sched_job *sched_job)
{
struct nouveau_job *job = to_nouveau_job(sched_job);
return nouveau_job_run(job);
}
static enum drm_gpu_sched_stat
nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
{
struct nouveau_job *job = to_nouveau_job(sched_job);
NV_PRINTK(warn, job->cli, "Job timed out.\n");
if (job->ops->timeout)
return job->ops->timeout(job);
return DRM_GPU_SCHED_STAT_ENODEV;
}
static void
nouveau_sched_free_job(struct drm_sched_job *sched_job)
{
struct nouveau_job *job = to_nouveau_job(sched_job);
nouveau_job_fini(job);
}
int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
struct drm_gpu_scheduler *sched,
struct workqueue_struct *sched_wq)
{
mutex_init(&entity->mutex);
spin_lock_init(&entity->job.list.lock);
INIT_LIST_HEAD(&entity->job.list.head);
init_waitqueue_head(&entity->job.wq);
entity->sched_wq = sched_wq;
return drm_sched_entity_init(&entity->base,
DRM_SCHED_PRIORITY_NORMAL,
&sched, 1, NULL);
}
void
nouveau_sched_entity_fini(struct nouveau_sched_entity *entity)
{
drm_sched_entity_destroy(&entity->base);
}
static const struct drm_sched_backend_ops nouveau_sched_ops = {
.run_job = nouveau_sched_run_job,
.timedout_job = nouveau_sched_timedout_job,
.free_job = nouveau_sched_free_job,
};
int nouveau_sched_init(struct nouveau_drm *drm)
{
struct drm_gpu_scheduler *sched = &drm->sched;
long job_hang_limit = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS);
drm->sched_wq = create_singlethread_workqueue("nouveau_sched_wq");
if (!drm->sched_wq)
return -ENOMEM;
return drm_sched_init(sched, &nouveau_sched_ops,
NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
NULL, NULL, "nouveau_sched", drm->dev->dev);
}
void nouveau_sched_fini(struct nouveau_drm *drm)
{
destroy_workqueue(drm->sched_wq);
drm_sched_fini(&drm->sched);
}

View file

@ -0,0 +1,127 @@
/* SPDX-License-Identifier: MIT */
#ifndef NOUVEAU_SCHED_H
#define NOUVEAU_SCHED_H
#include <linux/types.h>
#include <drm/drm_exec.h>
#include <drm/gpu_scheduler.h>
#include "nouveau_drv.h"
#define to_nouveau_job(sched_job) \
container_of((sched_job), struct nouveau_job, base)
struct nouveau_job_ops;
enum nouveau_job_state {
NOUVEAU_JOB_UNINITIALIZED = 0,
NOUVEAU_JOB_INITIALIZED,
NOUVEAU_JOB_SUBMIT_SUCCESS,
NOUVEAU_JOB_SUBMIT_FAILED,
NOUVEAU_JOB_RUN_SUCCESS,
NOUVEAU_JOB_RUN_FAILED,
};
struct nouveau_job_args {
struct drm_file *file_priv;
struct nouveau_sched_entity *sched_entity;
enum dma_resv_usage resv_usage;
bool sync;
struct {
struct drm_nouveau_sync *s;
u32 count;
} in_sync;
struct {
struct drm_nouveau_sync *s;
u32 count;
} out_sync;
struct nouveau_job_ops *ops;
};
struct nouveau_job {
struct drm_sched_job base;
enum nouveau_job_state state;
struct nouveau_sched_entity *entity;
struct drm_file *file_priv;
struct nouveau_cli *cli;
struct drm_exec exec;
enum dma_resv_usage resv_usage;
struct dma_fence *done_fence;
bool sync;
struct {
struct drm_nouveau_sync *data;
u32 count;
} in_sync;
struct {
struct drm_nouveau_sync *data;
struct drm_syncobj **objs;
struct dma_fence_chain **chains;
u32 count;
} out_sync;
struct nouveau_job_ops {
/* If .submit() returns without any error, it is guaranteed that
* armed_submit() is called.
*/
int (*submit)(struct nouveau_job *);
void (*armed_submit)(struct nouveau_job *);
struct dma_fence *(*run)(struct nouveau_job *);
void (*free)(struct nouveau_job *);
enum drm_gpu_sched_stat (*timeout)(struct nouveau_job *);
} *ops;
};
int nouveau_job_ucopy_syncs(struct nouveau_job_args *args,
u32 inc, u64 ins,
u32 outc, u64 outs);
int nouveau_job_init(struct nouveau_job *job,
struct nouveau_job_args *args);
void nouveau_job_free(struct nouveau_job *job);
int nouveau_job_submit(struct nouveau_job *job);
void nouveau_job_fini(struct nouveau_job *job);
#define to_nouveau_sched_entity(entity) \
container_of((entity), struct nouveau_sched_entity, base)
struct nouveau_sched_entity {
struct drm_sched_entity base;
struct mutex mutex;
struct workqueue_struct *sched_wq;
struct {
struct {
struct list_head head;
spinlock_t lock;
} list;
struct wait_queue_head wq;
} job;
};
int nouveau_sched_entity_init(struct nouveau_sched_entity *entity,
struct drm_gpu_scheduler *sched,
struct workqueue_struct *sched_wq);
void nouveau_sched_entity_fini(struct nouveau_sched_entity *entity);
bool nouveau_sched_entity_qwork(struct nouveau_sched_entity *entity,
struct work_struct *work);
int nouveau_sched_init(struct nouveau_drm *drm);
void nouveau_sched_fini(struct nouveau_drm *drm);
#endif

View file

@ -350,7 +350,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
* VMM instead of the standard one.
*/
ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
cli->vmm.vmm.object.oclass, true,
cli->vmm.vmm.object.oclass, MANAGED,
args->unmanaged_addr, args->unmanaged_size,
&(struct gp100_vmm_v0) {
.fault_replay = true,

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,108 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_UVMM_H__
#define __NOUVEAU_UVMM_H__
#include <drm/drm_gpuva_mgr.h>
#include "nouveau_drv.h"
struct nouveau_uvmm {
struct nouveau_vmm vmm;
struct drm_gpuva_manager umgr;
struct maple_tree region_mt;
struct mutex mutex;
struct dma_resv resv;
u64 kernel_managed_addr;
u64 kernel_managed_size;
bool disabled;
};
struct nouveau_uvma_region {
struct nouveau_uvmm *uvmm;
struct {
u64 addr;
u64 range;
} va;
struct kref kref;
struct completion complete;
bool dirty;
};
struct nouveau_uvma {
struct drm_gpuva va;
struct nouveau_uvma_region *region;
u8 kind;
};
#define uvmm_from_mgr(x) container_of((x), struct nouveau_uvmm, umgr)
#define uvma_from_va(x) container_of((x), struct nouveau_uvma, va)
#define to_uvmm(x) uvmm_from_mgr((x)->va.mgr)
struct nouveau_uvmm_bind_job {
struct nouveau_job base;
struct kref kref;
struct list_head entry;
struct work_struct work;
struct completion complete;
/* struct bind_job_op */
struct list_head ops;
};
struct nouveau_uvmm_bind_job_args {
struct drm_file *file_priv;
struct nouveau_sched_entity *sched_entity;
unsigned int flags;
struct {
struct drm_nouveau_sync *s;
u32 count;
} in_sync;
struct {
struct drm_nouveau_sync *s;
u32 count;
} out_sync;
struct {
struct drm_nouveau_vm_bind_op *s;
u32 count;
} op;
};
#define to_uvmm_bind_job(job) container_of((job), struct nouveau_uvmm_bind_job, base)
int nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
u64 kernel_managed_addr, u64 kernel_managed_size);
void nouveau_uvmm_fini(struct nouveau_uvmm *uvmm);
void nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbov, struct nouveau_mem *mem);
void nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo);
int nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev, void *data,
struct drm_file *file_priv);
static inline void nouveau_uvmm_lock(struct nouveau_uvmm *uvmm)
{
mutex_lock(&uvmm->mutex);
}
static inline void nouveau_uvmm_unlock(struct nouveau_uvmm *uvmm)
{
mutex_unlock(&uvmm->mutex);
}
#endif

View file

@ -128,8 +128,8 @@ nouveau_vmm_fini(struct nouveau_vmm *vmm)
int
nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
{
int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, false, PAGE_SIZE,
0, NULL, 0, &vmm->vmm);
int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, UNMANAGED,
PAGE_SIZE, 0, NULL, 0, &vmm->vmm);
if (ret)
return ret;

View file

@ -104,6 +104,90 @@ nvif_vmm_get(struct nvif_vmm *vmm, enum nvif_vmm_get type, bool sparse,
return ret;
}
int
nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size,
u8 shift)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_GET,
.addr = addr,
.size = size,
.shift = shift,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_PUT,
.addr = addr,
.size = size,
.shift = shift,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
void *argv, u32 argc, struct nvif_mem *mem, u64 offset)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_MAP,
.addr = addr,
.size = size,
.shift = shift,
.memory = nvif_handle(&mem->object),
.offset = offset,
.argv = (u64)(uintptr_t)argv,
.argc = argc,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
u8 shift, bool sparse)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_UNMAP,
.addr = addr,
.size = size,
.shift = shift,
.sparse = sparse,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
int
nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref)
{
struct nvif_vmm_raw_v0 args = {
.version = 0,
.op = NVIF_VMM_RAW_V0_SPARSE,
.addr = addr,
.size = size,
.ref = ref,
};
return nvif_object_mthd(&vmm->object, NVIF_VMM_V0_RAW,
&args, sizeof(args));
}
void
nvif_vmm_dtor(struct nvif_vmm *vmm)
{
@ -112,8 +196,9 @@ nvif_vmm_dtor(struct nvif_vmm *vmm)
}
int
nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed,
u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *vmm)
nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
enum nvif_vmm_type type, u64 addr, u64 size, void *argv, u32 argc,
struct nvif_vmm *vmm)
{
struct nvif_vmm_v0 *args;
u32 argn = sizeof(*args) + argc;
@ -125,9 +210,18 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, bool managed,
if (!(args = kmalloc(argn, GFP_KERNEL)))
return -ENOMEM;
args->version = 0;
args->managed = managed;
args->addr = addr;
args->size = size;
switch (type) {
case UNMANAGED: args->type = NVIF_VMM_V0_TYPE_UNMANAGED; break;
case MANAGED: args->type = NVIF_VMM_V0_TYPE_MANAGED; break;
case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
default:
WARN_ON(1);
return -EINVAL;
}
memcpy(args->data, argv, argc);
ret = nvif_object_ctor(&mmu->object, name ? name : "nvifVmm", 0,

View file

@ -58,10 +58,13 @@ nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
return -EINVAL;
if (size) {
mutex_lock(&vmm->mutex);
mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
}
return ret;
@ -88,10 +91,13 @@ nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
return -EINVAL;
if (size) {
mutex_lock(&vmm->mutex);
mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
}
return ret;
@ -113,7 +119,10 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
mutex_lock(&vmm->mutex);
if (nvkm_vmm_in_managed_range(vmm, addr, 0) && vmm->managed.raw)
return -EINVAL;
mutex_lock(&vmm->mutex.vmm);
vma = nvkm_vmm_node_search(vmm, addr);
if (ret = -ENOENT, !vma || vma->addr != addr) {
VMM_DEBUG(vmm, "lookup %016llx: %016llx",
@ -134,7 +143,7 @@ nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
nvkm_vmm_unmap_locked(vmm, vma, false);
ret = 0;
done:
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
return ret;
}
@ -159,13 +168,16 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw)
return -EINVAL;
memory = nvkm_umem_search(client, handle);
if (IS_ERR(memory)) {
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
return PTR_ERR(memory);
}
mutex_lock(&vmm->mutex);
mutex_lock(&vmm->mutex.vmm);
if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
VMM_DEBUG(vmm, "lookup %016llx", addr);
goto fail;
@ -198,7 +210,7 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
}
}
vma->busy = true;
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
if (ret == 0) {
@ -207,11 +219,11 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return 0;
}
mutex_lock(&vmm->mutex);
mutex_lock(&vmm->mutex.vmm);
vma->busy = false;
nvkm_vmm_unmap_region(vmm, vma);
fail:
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
nvkm_memory_unref(&memory);
return ret;
}
@ -232,7 +244,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
mutex_lock(&vmm->mutex);
mutex_lock(&vmm->mutex.vmm);
vma = nvkm_vmm_node_search(vmm, args->v0.addr);
if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
@ -248,7 +260,7 @@ nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
nvkm_vmm_put_locked(vmm, vma);
ret = 0;
done:
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
return ret;
}
@ -275,10 +287,10 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
mutex_lock(&vmm->mutex);
mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
page, align, size, &vma);
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
if (ret)
return ret;
@ -314,6 +326,168 @@ nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
return 0;
}
static inline int
nvkm_uvmm_page_index(struct nvkm_uvmm *uvmm, u64 size, u8 shift, u8 *refd)
{
struct nvkm_vmm *vmm = uvmm->vmm;
const struct nvkm_vmm_page *page;
if (likely(shift)) {
for (page = vmm->func->page; page->shift; page++) {
if (shift == page->shift)
break;
}
if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
VMM_DEBUG(vmm, "page %d %016llx", shift, size);
return -EINVAL;
}
} else {
return -EINVAL;
}
*refd = page - vmm->func->page;
return 0;
}
static int
nvkm_uvmm_mthd_raw_get(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
return nvkm_vmm_raw_get(vmm, args->addr, args->size, refd);
}
static int
nvkm_uvmm_mthd_raw_put(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
nvkm_vmm_raw_put(vmm, args->addr, args->size, refd);
return 0;
}
static int
nvkm_uvmm_mthd_raw_map(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_client *client = uvmm->object.client;
struct nvkm_vmm *vmm = uvmm->vmm;
struct nvkm_vma vma = {
.addr = args->addr,
.size = args->size,
.used = true,
.mapref = false,
.no_comp = true,
};
struct nvkm_memory *memory;
void *argv = (void *)(uintptr_t)args->argv;
unsigned int argc = args->argc;
u64 handle = args->memory;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
vma.page = vma.refd = refd;
memory = nvkm_umem_search(client, args->memory);
if (IS_ERR(memory)) {
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
return PTR_ERR(memory);
}
ret = nvkm_memory_map(memory, args->offset, vmm, &vma, argv, argc);
nvkm_memory_unref(&vma.memory);
nvkm_memory_unref(&memory);
return ret;
}
static int
nvkm_uvmm_mthd_raw_unmap(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
u8 refd;
int ret;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
ret = nvkm_uvmm_page_index(uvmm, args->size, args->shift, &refd);
if (ret)
return ret;
nvkm_vmm_raw_unmap(vmm, args->addr, args->size,
args->sparse, refd);
return 0;
}
static int
nvkm_uvmm_mthd_raw_sparse(struct nvkm_uvmm *uvmm, struct nvif_vmm_raw_v0 *args)
{
struct nvkm_vmm *vmm = uvmm->vmm;
if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size))
return -EINVAL;
return nvkm_vmm_raw_sparse(vmm, args->addr, args->size, args->ref);
}
static int
nvkm_uvmm_mthd_raw(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
{
union {
struct nvif_vmm_raw_v0 v0;
} *args = argv;
int ret = -ENOSYS;
if (!uvmm->vmm->managed.raw)
return -EINVAL;
if ((ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true)))
return ret;
switch (args->v0.op) {
case NVIF_VMM_RAW_V0_GET:
return nvkm_uvmm_mthd_raw_get(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_PUT:
return nvkm_uvmm_mthd_raw_put(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_MAP:
return nvkm_uvmm_mthd_raw_map(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_UNMAP:
return nvkm_uvmm_mthd_raw_unmap(uvmm, &args->v0);
case NVIF_VMM_RAW_V0_SPARSE:
return nvkm_uvmm_mthd_raw_sparse(uvmm, &args->v0);
default:
return -EINVAL;
};
}
static int
nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
@ -326,6 +500,7 @@ nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
case NVIF_VMM_V0_RAW : return nvkm_uvmm_mthd_raw (uvmm, argv, argc);
case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
if (uvmm->vmm->func->mthd) {
return uvmm->vmm->func->mthd(uvmm->vmm,
@ -366,10 +541,11 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
struct nvkm_uvmm *uvmm;
int ret = -ENOSYS;
u64 addr, size;
bool managed;
bool managed, raw;
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
managed = args->v0.managed != 0;
managed = args->v0.type == NVIF_VMM_V0_TYPE_MANAGED;
raw = args->v0.type == NVIF_VMM_V0_TYPE_RAW;
addr = args->v0.addr;
size = args->v0.size;
} else
@ -377,12 +553,13 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
*pobject = &uvmm->object;
if (!mmu->vmm) {
ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc,
NULL, "user", &uvmm->vmm);
ret = mmu->func->vmm.ctor(mmu, managed || raw, addr, size,
argv, argc, NULL, "user", &uvmm->vmm);
if (ret)
return ret;
@ -393,6 +570,7 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
}
uvmm->vmm->managed.raw = raw;
page = uvmm->vmm->func->page;
args->v0.page_nr = 0;

View file

@ -677,20 +677,94 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
}
static void
nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
const struct nvkm_vmm_desc_func *func = page->desc->func;
mutex_lock(&vmm->mutex.map);
nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
NULL, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap);
mutex_unlock(&vmm->mutex.map);
}
static void
nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
mutex_lock(&vmm->mutex.map);
nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
NULL, func, map, NULL);
mutex_unlock(&vmm->mutex.map);
}
static void
nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
nvkm_vmm_unref_ptes, NULL, NULL, NULL);
}
static void
nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
mutex_lock(&vmm->mutex.ref);
nvkm_vmm_ptes_put_locked(vmm, page, addr, size);
mutex_unlock(&vmm->mutex.ref);
}
static int
nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
u64 fail;
mutex_lock(&vmm->mutex.ref);
fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
nvkm_vmm_ref_ptes, NULL, NULL, NULL);
if (fail != ~0ULL) {
if (fail != addr)
nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr);
mutex_unlock(&vmm->mutex.ref);
return -ENOMEM;
}
mutex_unlock(&vmm->mutex.ref);
return 0;
}
static void
__nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
const struct nvkm_vmm_desc_func *func = page->desc->func;
nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap);
}
static void
nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
if (vmm->managed.raw) {
nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn);
nvkm_vmm_ptes_put(vmm, page, addr, size);
} else {
__nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn);
}
}
static int
nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
__nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
false, nvkm_vmm_ref_ptes, func, map, NULL);
@ -702,49 +776,27 @@ nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
return 0;
}
static void
nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, bool sparse, bool pfn)
{
const struct nvkm_vmm_desc_func *func = page->desc->func;
nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
NULL, NULL, NULL,
sparse ? func->sparse : func->invalid ? func->invalid :
func->unmap);
}
static void
nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
NULL, func, map, NULL);
}
static void
nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
{
nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
nvkm_vmm_unref_ptes, NULL, NULL, NULL);
}
static int
nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size)
nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
u64 addr, u64 size, struct nvkm_vmm_map *map,
nvkm_vmm_pte_func func)
{
u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
nvkm_vmm_ref_ptes, NULL, NULL, NULL);
if (fail != ~0ULL) {
if (fail != addr)
nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
return -ENOMEM;
int ret;
if (vmm->managed.raw) {
ret = nvkm_vmm_ptes_get(vmm, page, addr, size);
if (ret)
return ret;
nvkm_vmm_ptes_map(vmm, page, addr, size, map, func);
return 0;
} else {
return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func);
}
return 0;
}
static inline struct nvkm_vma *
struct nvkm_vma *
nvkm_vma_new(u64 addr, u64 size)
{
struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
@ -1045,7 +1097,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
vmm->debug = mmu->subdev.debug;
kref_init(&vmm->kref);
__mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
__mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key);
mutex_init(&vmm->mutex.ref);
mutex_init(&vmm->mutex.map);
/* Locate the smallest page size supported by the backend, it will
* have the deepest nesting of page tables.
@ -1101,6 +1155,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
return ret;
vmm->managed.p.addr = 0;
vmm->managed.p.size = addr;
/* NVKM-managed area. */
if (size) {
if (!(vma = nvkm_vma_new(addr, size)))
@ -1114,6 +1171,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
size = vmm->limit - addr;
if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
return ret;
vmm->managed.n.addr = addr;
vmm->managed.n.size = size;
} else {
/* Address-space fully managed by NVKM, requiring calls to
* nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
@ -1362,9 +1422,9 @@ void
nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
if (vma->memory) {
mutex_lock(&vmm->mutex);
mutex_lock(&vmm->mutex.vmm);
nvkm_vmm_unmap_locked(vmm, vma, false);
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
}
}
@ -1423,6 +1483,8 @@ nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
nvkm_vmm_pte_func func;
int ret;
map->no_comp = vma->no_comp;
/* Make sure we won't overrun the end of the memory object. */
if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
@ -1507,10 +1569,15 @@ nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
int ret;
mutex_lock(&vmm->mutex);
if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) &&
vmm->managed.raw)
return nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
vma->busy = false;
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
return ret;
}
@ -1620,9 +1687,9 @@ nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
{
struct nvkm_vma *vma = *pvma;
if (vma) {
mutex_lock(&vmm->mutex);
mutex_lock(&vmm->mutex.vmm);
nvkm_vmm_put_locked(vmm, vma);
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
*pvma = NULL;
}
}
@ -1769,9 +1836,49 @@ int
nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
{
int ret;
mutex_lock(&vmm->mutex);
mutex_lock(&vmm->mutex.vmm);
ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
return ret;
}
void
nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
bool sparse, u8 refd)
{
const struct nvkm_vmm_page *page = &vmm->func->page[refd];
nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false);
}
void
nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
{
const struct nvkm_vmm_page *page = vmm->func->page;
nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
}
int
nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd)
{
const struct nvkm_vmm_page *page = vmm->func->page;
if (unlikely(!size))
return -EINVAL;
return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size);
}
int
nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
{
int ret;
mutex_lock(&vmm->mutex.ref);
ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref);
mutex_unlock(&vmm->mutex.ref);
return ret;
}
@ -1779,9 +1886,9 @@ void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
if (inst && vmm && vmm->func->part) {
mutex_lock(&vmm->mutex);
mutex_lock(&vmm->mutex.vmm);
vmm->func->part(vmm, inst);
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
}
}
@ -1790,9 +1897,9 @@ nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
int ret = 0;
if (vmm->func->join) {
mutex_lock(&vmm->mutex);
mutex_lock(&vmm->mutex.vmm);
ret = vmm->func->join(vmm, inst);
mutex_unlock(&vmm->mutex);
mutex_unlock(&vmm->mutex.vmm);
}
return ret;
}

View file

@ -163,6 +163,7 @@ int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
u32 pd_header, bool managed, u64 addr, u64 size,
struct lock_class_key *, const char *name,
struct nvkm_vmm **);
struct nvkm_vma *nvkm_vma_new(u64 addr, u64 size);
struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
u64 addr, u64 size);
@ -173,6 +174,30 @@ void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn);
void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *);
int nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
void nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd);
void nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size,
bool sparse, u8 refd);
int nvkm_vmm_raw_sparse(struct nvkm_vmm *, u64 addr, u64 size, bool ref);
static inline bool
nvkm_vmm_in_managed_range(struct nvkm_vmm *vmm, u64 start, u64 size)
{
u64 p_start = vmm->managed.p.addr;
u64 p_end = p_start + vmm->managed.p.size;
u64 n_start = vmm->managed.n.addr;
u64 n_end = n_start + vmm->managed.n.size;
u64 end = start + size;
if (start >= p_start && end <= p_end)
return true;
if (start >= n_start && end <= n_end)
return true;
return false;
}
#define NVKM_VMM_PFN_ADDR 0xfffffffffffff000ULL
#define NVKM_VMM_PFN_ADDR_SHIFT 12
#define NVKM_VMM_PFN_APER 0x00000000000000f0ULL

View file

@ -287,15 +287,17 @@ gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
ret = nvkm_memory_tags_get(memory, device, tags,
nvkm_ltc_tags_clear,
&map->tags);
if (ret) {
VMM_DEBUG(vmm, "comp %d", ret);
return ret;
if (!map->no_comp) {
ret = nvkm_memory_tags_get(memory, device, tags,
nvkm_ltc_tags_clear,
&map->tags);
if (ret) {
VMM_DEBUG(vmm, "comp %d", ret);
return ret;
}
}
if (map->tags->mn) {
if (!map->no_comp && map->tags->mn) {
u64 tags = map->tags->mn->offset + (map->offset >> 17);
if (page->shift == 17 || !gm20x) {
map->type |= tags << 44;

View file

@ -453,15 +453,17 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
ret = nvkm_memory_tags_get(memory, device, tags,
nvkm_ltc_tags_clear,
&map->tags);
if (ret) {
VMM_DEBUG(vmm, "comp %d", ret);
return ret;
if (!map->no_comp) {
ret = nvkm_memory_tags_get(memory, device, tags,
nvkm_ltc_tags_clear,
&map->tags);
if (ret) {
VMM_DEBUG(vmm, "comp %d", ret);
return ret;
}
}
if (map->tags->mn) {
if (!map->no_comp && map->tags->mn) {
tags = map->tags->mn->offset + (map->offset >> 16);
map->ctag |= ((1ULL << page->shift) >> 16) << 36;
map->type |= tags << 36;

View file

@ -296,19 +296,22 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
ret = nvkm_memory_tags_get(memory, device, tags, NULL,
&map->tags);
if (ret) {
VMM_DEBUG(vmm, "comp %d", ret);
return ret;
}
if (!map->no_comp) {
ret = nvkm_memory_tags_get(memory, device, tags, NULL,
&map->tags);
if (ret) {
VMM_DEBUG(vmm, "comp %d", ret);
return ret;
}
if (map->tags->mn) {
u32 tags = map->tags->mn->offset + (map->offset >> 16);
map->ctag |= (u64)comp << 49;
map->type |= (u64)comp << 47;
map->type |= (u64)tags << 49;
map->next |= map->ctag;
if (map->tags->mn) {
u32 tags = map->tags->mn->offset +
(map->offset >> 16);
map->ctag |= (u64)comp << 49;
map->type |= (u64)comp << 47;
map->type |= (u64)tags << 49;
map->next |= map->ctag;
}
}
}

View file

@ -118,6 +118,9 @@ struct st7789_panel_info {
u32 bus_format;
u32 bus_flags;
bool invert_mode;
bool partial_mode;
u16 partial_start;
u16 partial_end;
};
struct st7789v {
@ -126,6 +129,7 @@ struct st7789v {
struct spi_device *spi;
struct gpio_desc *reset;
struct regulator *power;
enum drm_panel_orientation orientation;
};
enum st7789v_prefix {
@ -275,6 +279,21 @@ static const struct drm_display_mode et028013dma_mode = {
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
.clock = 6000,
.hdisplay = 240,
.hsync_start = 240 + 28,
.hsync_end = 240 + 28 + 10,
.htotal = 240 + 28 + 10 + 10,
.vdisplay = 280,
.vsync_start = 280 + 8,
.vsync_end = 280 + 8 + 4,
.vtotal = 280 + 8 + 4 + 4,
.width_mm = 43,
.height_mm = 37,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct st7789_panel_info default_panel = {
.mode = &default_mode,
.invert_mode = true,
@ -299,6 +318,17 @@ static const struct st7789_panel_info et028013dma_panel = {
DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
};
static const struct st7789_panel_info jt240mhqs_hwt_ek_e3_panel = {
.mode = &jt240mhqs_hwt_ek_e3_mode,
.invert_mode = true,
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
.bus_flags = DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
.partial_mode = true,
.partial_start = 38,
.partial_end = 318,
};
static int st7789v_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@ -325,15 +355,33 @@ static int st7789v_get_modes(struct drm_panel *panel,
drm_display_info_set_bus_formats(&connector->display_info,
&ctx->info->bus_format, 1);
/*
* TODO: Remove once all drm drivers call
* drm_connector_set_orientation_from_panel()
*/
drm_connector_set_panel_orientation(connector, ctx->orientation);
return 1;
}
static enum drm_panel_orientation st7789v_get_orientation(struct drm_panel *p)
{
struct st7789v *ctx = panel_to_st7789v(p);
return ctx->orientation;
}
static int st7789v_prepare(struct drm_panel *panel)
{
struct st7789v *ctx = panel_to_st7789v(panel);
u8 pixel_fmt, polarity;
u8 mode, pixel_fmt, polarity;
int ret;
if (!ctx->info->partial_mode)
mode = ST7789V_RGBCTRL_WO;
else
mode = 0;
switch (ctx->info->bus_format) {
case MEDIA_BUS_FMT_RGB666_1X18:
pixel_fmt = MIPI_DCS_PIXEL_FMT_18BIT;
@ -473,6 +521,37 @@ static int st7789v_prepare(struct drm_panel *panel)
MIPI_DCS_EXIT_INVERT_MODE));
}
if (ctx->info->partial_mode) {
u8 area_data[4] = {
(ctx->info->partial_start >> 8) & 0xff,
(ctx->info->partial_start >> 0) & 0xff,
((ctx->info->partial_end - 1) >> 8) & 0xff,
((ctx->info->partial_end - 1) >> 0) & 0xff,
};
/* Caution: if userspace ever pushes a mode different from the
* expected one (i.e., the one advertised by get_modes), we'll
* add margins.
*/
ST7789V_TEST(ret, st7789v_write_command(
ctx, MIPI_DCS_ENTER_PARTIAL_MODE));
ST7789V_TEST(ret, st7789v_write_command(
ctx, MIPI_DCS_SET_PAGE_ADDRESS));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[0]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[1]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[2]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[3]));
ST7789V_TEST(ret, st7789v_write_command(
ctx, MIPI_DCS_SET_PARTIAL_ROWS));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[0]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[1]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[2]));
ST7789V_TEST(ret, st7789v_write_data(ctx, area_data[3]));
}
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RAMCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_DM_RGB |
ST7789V_RAMCTRL_RM_RGB));
@ -480,7 +559,7 @@ static int st7789v_prepare(struct drm_panel *panel)
ST7789V_RAMCTRL_MAGIC));
ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RGBCTRL_CMD));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_WO |
ST7789V_TEST(ret, st7789v_write_data(ctx, mode |
ST7789V_RGBCTRL_RCM(2) |
polarity));
ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_VBP(8)));
@ -519,11 +598,12 @@ static int st7789v_unprepare(struct drm_panel *panel)
}
static const struct drm_panel_funcs st7789v_drm_funcs = {
.disable = st7789v_disable,
.enable = st7789v_enable,
.get_modes = st7789v_get_modes,
.prepare = st7789v_prepare,
.unprepare = st7789v_unprepare,
.disable = st7789v_disable,
.enable = st7789v_enable,
.get_modes = st7789v_get_modes,
.get_orientation = st7789v_get_orientation,
.prepare = st7789v_prepare,
.unprepare = st7789v_unprepare,
};
static int st7789v_probe(struct spi_device *spi)
@ -563,6 +643,8 @@ static int st7789v_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
drm_panel_add(&ctx->panel);
return 0;
@ -579,6 +661,7 @@ static const struct spi_device_id st7789v_spi_id[] = {
{ "st7789v", (unsigned long) &default_panel },
{ "t28cp45tn89-v17", (unsigned long) &t28cp45tn89_panel },
{ "et028013dma", (unsigned long) &et028013dma_panel },
{ "jt240mhqs-hwt-ek-e3", (unsigned long) &jt240mhqs_hwt_ek_e3_panel },
{ }
};
MODULE_DEVICE_TABLE(spi, st7789v_spi_id);
@ -587,6 +670,8 @@ static const struct of_device_id st7789v_of_match[] = {
{ .compatible = "sitronix,st7789v", .data = &default_panel },
{ .compatible = "inanbo,t28cp45tn89-v17", .data = &t28cp45tn89_panel },
{ .compatible = "edt,et028013dma", .data = &et028013dma_panel },
{ .compatible = "jasonic,jt240mhqs-hwt-ek-e3",
.data = &jt240mhqs_hwt_ek_e3_panel },
{ }
};
MODULE_DEVICE_TABLE(of, st7789v_of_match);

View file

@ -720,6 +720,22 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
if (dma_fence_is_signaled(job->done_fence))
return DRM_GPU_SCHED_STAT_NOMINAL;
/*
* Panfrost IRQ handler may take a long time to process an interrupt
* if there is another IRQ handler hogging the processing.
* For example, the HDMI encoder driver might be stuck in the IRQ
* handler for a significant time in a case of bad cable connection.
* In order to catch such cases and not report spurious Panfrost
* job timeouts, synchronize the IRQ handler and re-check the fence
* status.
*/
synchronize_irq(pfdev->js->irq);
if (dma_fence_is_signaled(job->done_fence)) {
dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n");
return DRM_GPU_SCHED_STAT_NOMINAL;
}
dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
js,
job_read(pfdev, JS_CONFIG(js)),

View file

@ -125,8 +125,6 @@ static void test_duplicates(struct kunit *test)
drm_exec_fini(&exec);
}
static void test_prepare(struct kunit *test)
{
struct drm_exec_priv *priv = test->priv;
@ -145,6 +143,8 @@ static void test_prepare(struct kunit *test)
break;
}
drm_exec_fini(&exec);
drm_gem_private_object_fini(&gobj);
}
static void test_prepare_array(struct kunit *test)
@ -165,6 +165,29 @@ static void test_prepare_array(struct kunit *test)
1);
KUNIT_EXPECT_EQ(test, ret, 0);
drm_exec_fini(&exec);
drm_gem_private_object_fini(&gobj1);
drm_gem_private_object_fini(&gobj2);
}
static void test_multiple_loops(struct kunit *test)
{
struct drm_exec exec;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec)
{
break;
}
drm_exec_fini(&exec);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec)
{
break;
}
drm_exec_fini(&exec);
KUNIT_SUCCEED(test);
}
static struct kunit_case drm_exec_tests[] = {
@ -174,6 +197,7 @@ static struct kunit_case drm_exec_tests[] = {
KUNIT_CASE(test_duplicates),
KUNIT_CASE(test_prepare),
KUNIT_CASE(test_prepare_array),
KUNIT_CASE(test_multiple_loops),
{}
};

View file

@ -8,3 +8,4 @@ ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += tests/

View file

@ -0,0 +1,4 @@
CONFIG_KUNIT=y
CONFIG_DRM=y
CONFIG_DRM_KUNIT_TEST_HELPERS=y
CONFIG_DRM_TTM_KUNIT_TEST=y

View file

@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0 AND MIT
obj-$(CONFIG_DRM_TTM_KUNIT_TEST) += \
ttm_device_test.o \
ttm_pool_test.o \
ttm_kunit_helpers.o

View file

@ -0,0 +1,212 @@
// SPDX-License-Identifier: GPL-2.0 AND MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include "ttm_kunit_helpers.h"
struct ttm_device_test_case {
const char *description;
bool use_dma_alloc;
bool use_dma32;
bool pools_init_expected;
};
static void ttm_device_init_basic(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_device *ttm_dev;
struct ttm_resource_manager *ttm_sys_man;
int err;
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev->wq);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
ttm_sys_man = &ttm_dev->sysman;
KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man);
KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_tt);
KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_type);
KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man->func);
KUNIT_EXPECT_PTR_EQ(test, ttm_dev->dev_mapping,
priv->drm->anon_inode->i_mapping);
ttm_device_fini(ttm_dev);
}
static void ttm_device_init_multiple(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_device *ttm_devs;
unsigned int i, num_dev = 3;
int err;
ttm_devs = kunit_kcalloc(test, num_dev, sizeof(*ttm_devs), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
for (i = 0; i < num_dev; i++) {
err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
priv->drm->anon_inode->i_mapping);
KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].wq);
KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].funcs, &ttm_dev_funcs);
KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].man_drv[TTM_PL_SYSTEM]);
}
KUNIT_ASSERT_EQ(test, list_count_nodes(&ttm_devs[0].device_list), num_dev);
for (i = 0; i < num_dev; i++)
ttm_device_fini(&ttm_devs[i]);
}
static void ttm_device_fini_basic(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_device *ttm_dev;
struct ttm_resource_manager *man;
int err;
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
KUNIT_ASSERT_NOT_NULL(test, man);
ttm_device_fini(ttm_dev);
KUNIT_ASSERT_FALSE(test, man->use_type);
KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[0]));
KUNIT_ASSERT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]);
}
static void ttm_device_init_no_vma_man(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct drm_device *drm = priv->drm;
struct ttm_device *ttm_dev;
struct drm_vma_offset_manager *vma_man;
int err;
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
/* Let's pretend there's no VMA manager allocated */
vma_man = drm->vma_offset_manager;
drm->vma_offset_manager = NULL;
err = ttm_device_kunit_init(priv, ttm_dev, false, false);
KUNIT_EXPECT_EQ(test, err, -EINVAL);
/* Bring the manager back for a graceful cleanup */
drm->vma_offset_manager = vma_man;
}
static const struct ttm_device_test_case ttm_device_cases[] = {
{
.description = "No DMA allocations, no DMA32 required",
.use_dma_alloc = false,
.use_dma32 = false,
.pools_init_expected = false,
},
{
.description = "DMA allocations, DMA32 required",
.use_dma_alloc = true,
.use_dma32 = true,
.pools_init_expected = true,
},
{
.description = "No DMA allocations, DMA32 required",
.use_dma_alloc = false,
.use_dma32 = true,
.pools_init_expected = false,
},
{
.description = "DMA allocations, no DMA32 required",
.use_dma_alloc = true,
.use_dma32 = false,
.pools_init_expected = true,
},
};
static void ttm_device_case_desc(const struct ttm_device_test_case *t, char *desc)
{
strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(ttm_device, ttm_device_cases, ttm_device_case_desc);
static void ttm_device_init_pools(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
const struct ttm_device_test_case *params = test->param_value;
struct ttm_device *ttm_dev;
struct ttm_pool *pool;
struct ttm_pool_type pt;
int err;
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(priv, ttm_dev,
params->use_dma_alloc,
params->use_dma32);
KUNIT_ASSERT_EQ(test, err, 0);
pool = &ttm_dev->pool;
KUNIT_ASSERT_NOT_NULL(test, pool);
KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32);
if (params->pools_init_expected) {
for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (int j = 0; j <= MAX_ORDER; ++j) {
pt = pool->caching[i].orders[j];
KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
KUNIT_EXPECT_EQ(test, pt.caching, i);
KUNIT_EXPECT_EQ(test, pt.order, j);
if (params->use_dma_alloc)
KUNIT_ASSERT_FALSE(test,
list_empty(&pt.pages));
}
}
}
ttm_device_fini(ttm_dev);
}
static struct kunit_case ttm_device_test_cases[] = {
KUNIT_CASE(ttm_device_init_basic),
KUNIT_CASE(ttm_device_init_multiple),
KUNIT_CASE(ttm_device_fini_basic),
KUNIT_CASE(ttm_device_init_no_vma_man),
KUNIT_CASE_PARAM(ttm_device_init_pools, ttm_device_gen_params),
{}
};
static struct kunit_suite ttm_device_test_suite = {
.name = "ttm_device",
.init = ttm_test_devices_init,
.exit = ttm_test_devices_fini,
.test_cases = ttm_device_test_cases,
};
kunit_test_suites(&ttm_device_test_suite);
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,113 @@
// SPDX-License-Identifier: GPL-2.0 AND MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "ttm_kunit_helpers.h"
struct ttm_device_funcs ttm_dev_funcs = {
};
EXPORT_SYMBOL_GPL(ttm_dev_funcs);
int ttm_device_kunit_init(struct ttm_test_devices *priv,
struct ttm_device *ttm,
bool use_dma_alloc,
bool use_dma32)
{
struct drm_device *drm = priv->drm;
int err;
err = ttm_device_init(ttm, &ttm_dev_funcs, drm->dev,
drm->anon_inode->i_mapping,
drm->vma_offset_manager,
use_dma_alloc, use_dma32);
return err;
}
EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs,
size_t size)
{
struct drm_gem_object gem_obj = { .size = size };
struct ttm_buffer_object *bo;
bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, bo);
bo->base = gem_obj;
bo->bdev = devs->ttm_dev;
return bo;
}
EXPORT_SYMBOL_GPL(ttm_bo_kunit_init);
struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test)
{
struct ttm_test_devices *devs;
devs = kunit_kzalloc(test, sizeof(*devs), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, devs);
devs->dev = drm_kunit_helper_alloc_device(test);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev);
devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev,
sizeof(*devs->drm), 0,
DRIVER_GEM);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm);
return devs;
}
EXPORT_SYMBOL_GPL(ttm_test_devices_basic);
struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
{
struct ttm_test_devices *devs;
struct ttm_device *ttm_dev;
int err;
devs = ttm_test_devices_basic(test);
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
err = ttm_device_kunit_init(devs, ttm_dev, false, false);
KUNIT_ASSERT_EQ(test, err, 0);
devs->ttm_dev = ttm_dev;
return devs;
}
EXPORT_SYMBOL_GPL(ttm_test_devices_all);
void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs)
{
if (devs->ttm_dev)
ttm_device_fini(devs->ttm_dev);
drm_kunit_helper_free_device(test, devs->dev);
}
EXPORT_SYMBOL_GPL(ttm_test_devices_put);
int ttm_test_devices_init(struct kunit *test)
{
struct ttm_test_devices *priv;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, priv);
priv = ttm_test_devices_basic(test);
test->priv = priv;
return 0;
}
EXPORT_SYMBOL_GPL(ttm_test_devices_init);
void ttm_test_devices_fini(struct kunit *test)
{
ttm_test_devices_put(test, test->priv);
}
EXPORT_SYMBOL_GPL(ttm_test_devices_fini);
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,41 @@
/* SPDX-License-Identifier: GPL-2.0 AND MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef TTM_KUNIT_HELPERS_H
#define TTM_KUNIT_HELPERS_H
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/drm_kunit_helpers.h>
#include <kunit/test.h>
extern struct ttm_device_funcs ttm_dev_funcs;
struct ttm_test_devices {
struct drm_device *drm;
struct device *dev;
struct ttm_device *ttm_dev;
};
/* Building blocks for test-specific init functions */
int ttm_device_kunit_init(struct ttm_test_devices *priv,
struct ttm_device *ttm,
bool use_dma_alloc,
bool use_dma32);
struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs,
size_t size);
struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test);
struct ttm_test_devices *ttm_test_devices_all(struct kunit *test);
void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs);
/* Generic init/fini for tests that only need DRM/TTM devices */
int ttm_test_devices_init(struct kunit *test);
void ttm_test_devices_fini(struct kunit *test);
#endif // TTM_KUNIT_HELPERS_H

View file

@ -0,0 +1,437 @@
// SPDX-License-Identifier: GPL-2.0 AND MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <linux/mm.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_pool.h>
#include "ttm_kunit_helpers.h"
struct ttm_pool_test_case {
const char *description;
unsigned int order;
bool use_dma_alloc;
};
struct ttm_pool_test_priv {
struct ttm_test_devices *devs;
/* Used to create mock ttm_tts */
struct ttm_buffer_object *mock_bo;
};
static struct ttm_operation_ctx simple_ctx = {
.interruptible = true,
.no_wait_gpu = false,
};
static int ttm_pool_test_init(struct kunit *test)
{
struct ttm_pool_test_priv *priv;
priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, priv);
priv->devs = ttm_test_devices_basic(test);
test->priv = priv;
return 0;
}
static void ttm_pool_test_fini(struct kunit *test)
{
struct ttm_pool_test_priv *priv = test->priv;
ttm_test_devices_put(test, priv->devs);
}
static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
uint32_t page_flags,
enum ttm_caching caching,
size_t size)
{
struct ttm_pool_test_priv *priv = test->priv;
struct ttm_buffer_object *bo;
struct ttm_tt *tt;
int err;
bo = ttm_bo_kunit_init(test, priv->devs, size);
KUNIT_ASSERT_NOT_NULL(test, bo);
priv->mock_bo = bo;
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
KUNIT_ASSERT_EQ(test, err, 0);
return tt;
}
static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
size_t size,
enum ttm_caching caching)
{
struct ttm_pool_test_priv *priv = test->priv;
struct ttm_test_devices *devs = priv->devs;
struct ttm_pool *pool;
struct ttm_tt *tt;
unsigned long order = __fls(size / PAGE_SIZE);
int err;
tt = ttm_tt_kunit_init(test, order, caching, size);
KUNIT_ASSERT_NOT_NULL(test, tt);
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
ttm_pool_free(pool, tt);
ttm_tt_fini(tt);
return pool;
}
static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
{
.description = "One page",
.order = 0,
},
{
.description = "More than one page",
.order = 2,
},
{
.description = "Above the allocation limit",
.order = MAX_ORDER + 1,
},
{
.description = "One page, with coherent DMA mappings enabled",
.order = 0,
.use_dma_alloc = true,
},
{
.description = "Above the allocation limit, with coherent DMA mappings enabled",
.order = MAX_ORDER + 1,
.use_dma_alloc = true,
},
};
static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
char *desc)
{
strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
ttm_pool_alloc_case_desc);
static void ttm_pool_alloc_basic(struct kunit *test)
{
struct ttm_pool_test_priv *priv = test->priv;
struct ttm_test_devices *devs = priv->devs;
const struct ttm_pool_test_case *params = test->param_value;
struct ttm_tt *tt;
struct ttm_pool *pool;
struct page *fst_page, *last_page;
enum ttm_caching caching = ttm_uncached;
unsigned int expected_num_pages = 1 << params->order;
size_t size = expected_num_pages * PAGE_SIZE;
int err;
tt = ttm_tt_kunit_init(test, 0, caching, size);
KUNIT_ASSERT_NOT_NULL(test, tt);
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
false);
KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
fst_page = tt->pages[0];
last_page = tt->pages[tt->num_pages - 1];
if (params->order <= MAX_ORDER) {
if (params->use_dma_alloc) {
KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
} else {
KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
}
} else {
if (params->use_dma_alloc) {
KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
KUNIT_ASSERT_NULL(test, (void *)last_page->private);
} else {
/*
* We expect to alloc one big block, followed by
* order 0 blocks
*/
KUNIT_ASSERT_EQ(test, fst_page->private,
min_t(unsigned int, MAX_ORDER,
params->order));
KUNIT_ASSERT_EQ(test, last_page->private, 0);
}
}
ttm_pool_free(pool, tt);
ttm_tt_fini(tt);
ttm_pool_fini(pool);
}
static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
{
struct ttm_pool_test_priv *priv = test->priv;
struct ttm_test_devices *devs = priv->devs;
const struct ttm_pool_test_case *params = test->param_value;
struct ttm_tt *tt;
struct ttm_pool *pool;
struct ttm_buffer_object *bo;
dma_addr_t dma1, dma2;
enum ttm_caching caching = ttm_uncached;
unsigned int expected_num_pages = 1 << params->order;
size_t size = expected_num_pages * PAGE_SIZE;
int err;
tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, tt);
bo = ttm_bo_kunit_init(test, devs, size);
KUNIT_ASSERT_NOT_NULL(test, bo);
err = ttm_sg_tt_init(tt, bo, 0, caching);
KUNIT_ASSERT_EQ(test, err, 0);
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
dma1 = tt->dma_address[0];
dma2 = tt->dma_address[tt->num_pages - 1];
KUNIT_ASSERT_NOT_NULL(test, (void *)dma1);
KUNIT_ASSERT_NOT_NULL(test, (void *)dma2);
ttm_pool_free(pool, tt);
ttm_tt_fini(tt);
ttm_pool_fini(pool);
}
static void ttm_pool_alloc_order_caching_match(struct kunit *test)
{
struct ttm_tt *tt;
struct ttm_pool *pool;
struct ttm_pool_type *pt;
enum ttm_caching caching = ttm_uncached;
unsigned int order = 0;
size_t size = PAGE_SIZE;
int err;
pool = ttm_pool_pre_populated(test, size, caching);
pt = &pool->caching[caching].orders[order];
KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
tt = ttm_tt_kunit_init(test, 0, caching, size);
KUNIT_ASSERT_NOT_NULL(test, tt);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
ttm_pool_free(pool, tt);
ttm_tt_fini(tt);
ttm_pool_fini(pool);
}
static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
{
struct ttm_tt *tt;
struct ttm_pool *pool;
struct ttm_pool_type *pt_pool, *pt_tt;
enum ttm_caching tt_caching = ttm_uncached;
enum ttm_caching pool_caching = ttm_cached;
size_t size = PAGE_SIZE;
unsigned int order = 0;
int err;
pool = ttm_pool_pre_populated(test, size, pool_caching);
pt_pool = &pool->caching[pool_caching].orders[order];
pt_tt = &pool->caching[tt_caching].orders[order];
tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
KUNIT_ASSERT_NOT_NULL(test, tt);
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
ttm_pool_free(pool, tt);
ttm_tt_fini(tt);
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
ttm_pool_fini(pool);
}
static void ttm_pool_alloc_order_mismatch(struct kunit *test)
{
struct ttm_tt *tt;
struct ttm_pool *pool;
struct ttm_pool_type *pt_pool, *pt_tt;
enum ttm_caching caching = ttm_uncached;
unsigned int order = 2;
size_t fst_size = (1 << order) * PAGE_SIZE;
size_t snd_size = PAGE_SIZE;
int err;
pool = ttm_pool_pre_populated(test, fst_size, caching);
pt_pool = &pool->caching[caching].orders[order];
pt_tt = &pool->caching[caching].orders[0];
tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
KUNIT_ASSERT_NOT_NULL(test, tt);
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
ttm_pool_free(pool, tt);
ttm_tt_fini(tt);
KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
ttm_pool_fini(pool);
}
static void ttm_pool_free_dma_alloc(struct kunit *test)
{
struct ttm_pool_test_priv *priv = test->priv;
struct ttm_test_devices *devs = priv->devs;
struct ttm_tt *tt;
struct ttm_pool *pool;
struct ttm_pool_type *pt;
enum ttm_caching caching = ttm_uncached;
unsigned int order = 2;
size_t size = (1 << order) * PAGE_SIZE;
tt = ttm_tt_kunit_init(test, 0, caching, size);
KUNIT_ASSERT_NOT_NULL(test, tt);
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
ttm_pool_alloc(pool, tt, &simple_ctx);
pt = &pool->caching[caching].orders[order];
KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
ttm_pool_free(pool, tt);
ttm_tt_fini(tt);
KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
ttm_pool_fini(pool);
}
static void ttm_pool_free_no_dma_alloc(struct kunit *test)
{
struct ttm_pool_test_priv *priv = test->priv;
struct ttm_test_devices *devs = priv->devs;
struct ttm_tt *tt;
struct ttm_pool *pool;
struct ttm_pool_type *pt;
enum ttm_caching caching = ttm_uncached;
unsigned int order = 2;
size_t size = (1 << order) * PAGE_SIZE;
tt = ttm_tt_kunit_init(test, 0, caching, size);
KUNIT_ASSERT_NOT_NULL(test, tt);
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
ttm_pool_alloc(pool, tt, &simple_ctx);
pt = &pool->caching[caching].orders[order];
KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
ttm_pool_free(pool, tt);
ttm_tt_fini(tt);
KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
ttm_pool_fini(pool);
}
static void ttm_pool_fini_basic(struct kunit *test)
{
struct ttm_pool *pool;
struct ttm_pool_type *pt;
enum ttm_caching caching = ttm_uncached;
unsigned int order = 0;
size_t size = PAGE_SIZE;
pool = ttm_pool_pre_populated(test, size, caching);
pt = &pool->caching[caching].orders[order];
KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
ttm_pool_fini(pool);
KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
}
static struct kunit_case ttm_pool_test_cases[] = {
KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
ttm_pool_alloc_basic_gen_params),
KUNIT_CASE(ttm_pool_alloc_order_caching_match),
KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
KUNIT_CASE(ttm_pool_alloc_order_mismatch),
KUNIT_CASE(ttm_pool_free_dma_alloc),
KUNIT_CASE(ttm_pool_free_no_dma_alloc),
KUNIT_CASE(ttm_pool_fini_basic),
{}
};
static struct kunit_suite ttm_pool_test_suite = {
.name = "ttm_pool",
.init = ttm_pool_test_init,
.exit = ttm_pool_test_fini,
.test_cases = ttm_pool_test_cases,
};
kunit_test_suites(&ttm_pool_test_suite);
MODULE_LICENSE("GPL");

View file

@ -259,7 +259,7 @@ static const struct of_device_id tve200_of_match[] = {
static struct platform_driver tve200_driver = {
.driver = {
.name = "tve200",
.of_match_table = of_match_ptr(tve200_of_match),
.of_match_table = tve200_of_match,
},
.probe = tve200_probe,
.remove_new = tve200_remove,

View file

@ -3,11 +3,9 @@ config VIDEO_VIVID
tristate "Virtual Video Test Driver"
depends on VIDEO_DEV && !SPARC32 && !SPARC64 && FB
depends on HAS_DMA
select FB_IOMEM_HELPERS
select FONT_SUPPORT
select FONT_8x16
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
select VIDEO_V4L2_TPG

View file

@ -246,12 +246,10 @@ static int vivid_fb_blank(int blank_mode, struct fb_info *info)
static const struct fb_ops vivid_fb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = vivid_fb_check_var,
.fb_set_par = vivid_fb_set_par,
.fb_setcolreg = vivid_fb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_cursor = NULL,
.fb_ioctl = vivid_fb_ioctl,
.fb_pan_display = vivid_fb_pan_display,

View file

@ -64,9 +64,7 @@ config FB_MACMODES
config FB_GRVGA
tristate "Aeroflex Gaisler framebuffer support"
depends on FB && SPARC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
This enables support for the SVGACTRL framebuffer in the GRLIB IP library from Aeroflex Gaisler.
@ -139,9 +137,7 @@ config FB_ARMCLCD
config FB_ACORN
bool "Acorn VIDC support"
depends on (FB = y) && ARM && ARCH_ACORN
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
This is the frame buffer device driver for the Acorn VIDC graphics
hardware found in Acorn RISC PCs and other ARM-based machines. If
@ -179,9 +175,7 @@ config FB_IMX
depends on FB && HAVE_CLK && HAS_IOMEM
depends on ARCH_MXC || COMPILE_TEST
select LCD_CLASS_DEVICE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
@ -228,9 +222,7 @@ config FB_Q40
bool
depends on (FB = y) && Q40
default y
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
config FB_AMIGA
tristate "Amiga native chipset support"
@ -271,9 +263,7 @@ config FB_AMIGA_AGA
config FB_FM2
bool "Amiga FrameMaster II/Rainbow II support"
depends on (FB = y) && ZORRO
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
This is the frame buffer device driver for the Amiga FrameMaster
card from BSC (exhibited 1992 but not shipped as a CBM product).
@ -309,9 +299,7 @@ config FB_OF
depends on FB && PPC && (!PPC_PSERIES || PCI)
depends on !DRM_OFDRM
select APERTURE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select FB_MACMODES
help
Say Y if you want support with Open Firmware for your graphics
@ -331,9 +319,7 @@ config FB_CONTROL
config FB_PLATINUM
bool "Apple \"platinum\" display support"
depends on (FB = y) && PPC_PMAC && PPC32
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select FB_MACMODES
help
This driver supports a frame buffer for the "platinum" graphics
@ -342,9 +328,7 @@ config FB_PLATINUM
config FB_VALKYRIE
bool "Apple \"valkyrie\" display support"
depends on (FB = y) && (MAC || (PPC_PMAC && PPC32))
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select FB_MACMODES
help
This driver supports a frame buffer for the "valkyrie" graphics
@ -353,9 +337,7 @@ config FB_VALKYRIE
config FB_CT65550
bool "Chips 65550 display support"
depends on (FB = y) && PPC32 && PCI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Chips & Technologies
@ -364,9 +346,7 @@ config FB_CT65550
config FB_ASILIANT
bool "Asiliant (Chips) 69000 display support"
depends on (FB = y) && PCI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Asiliant 69030 chipset
@ -419,9 +399,7 @@ config FB_STI
config FB_MAC
bool "Generic Macintosh display support"
depends on (FB = y) && MAC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select FB_MACMODES
config FB_HP300
@ -458,9 +436,7 @@ config FB_UVESA
tristate "Userspace VESA VGA graphics support"
depends on FB && CONNECTOR
depends on !UML
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
help
This is the frame buffer driver for generic VBE 2.0 compliant
@ -477,9 +453,7 @@ config FB_VESA
bool "VESA VGA graphics support"
depends on (FB = y) && X86
select APERTURE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select SYSFB
help
This is the frame buffer device driver for generic VESA 2.0
@ -492,9 +466,7 @@ config FB_EFI
depends on (FB = y) && !IA64 && EFI
select APERTURE_HELPERS
select DRM_PANEL_ORIENTATION_QUIRKS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select SYSFB
help
This is the EFI frame buffer device driver. If the firmware on
@ -639,9 +611,7 @@ config FB_XVR500
config FB_XVR2500
bool "Sun XVR-2500 3DLABS Wildcat support"
depends on (FB = y) && PCI && SPARC64
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
This is the framebuffer device for the Sun XVR-2500 and similar
@ -653,9 +623,7 @@ config FB_XVR2500
config FB_XVR1000
bool "Sun XVR-1000 support"
depends on (FB = y) && SPARC64
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
This is the framebuffer device for the Sun XVR-1000 and similar
graphics cards. The driver only works on sparc64 systems where
@ -688,9 +656,7 @@ config FB_PVR2
config FB_OPENCORES
tristate "OpenCores VGA/LCD core 2.0 framebuffer support"
depends on FB && HAS_DMA
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
This enables support for the OpenCores VGA/LCD core.
@ -717,9 +683,7 @@ config FB_ATMEL
depends on FB && OF && HAVE_CLK && HAS_IOMEM
depends on HAVE_FB_ATMEL || COMPILE_TEST
select FB_BACKLIGHT
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
@ -823,10 +787,8 @@ config FB_RIVA_BACKLIGHT
config FB_I740
tristate "Intel740 support"
depends on FB && PCI
select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select VGASTATE
select VIDEO_NOMODESET
select FB_DDC
@ -1104,10 +1066,8 @@ config FB_RADEON_DEBUG
config FB_ATY128
tristate "ATI Rage128 display support"
depends on FB && PCI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_BACKLIGHT if FB_ATY128_BACKLIGHT
select FB_IOMEM_HELPERS
select FB_MACMODES if PPC_PMAC
select VIDEO_NOMODESET
help
@ -1326,9 +1286,7 @@ config FB_NEOMAGIC
config FB_KYRO
tristate "IMG Kyro support"
depends on FB && PCI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Say Y here if you have a STG4000 / Kyro / PowerVR 3 based
@ -1372,9 +1330,7 @@ config FB_VOODOO1
tristate "3Dfx Voodoo Graphics (sst1) support"
depends on FB && PCI
depends on FB_DEVICE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
@ -1457,9 +1413,7 @@ config FB_PM3
config FB_CARMINE
tristate "Fujitsu carmine frame buffer support"
depends on FB && PCI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
This is the frame buffer device driver for the Fujitsu Carmine chip.
@ -1551,9 +1505,7 @@ config FB_HIT
config FB_PMAG_AA
tristate "PMAG-AA TURBOchannel framebuffer support"
depends on FB && TC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
Support for the PMAG-AA TURBOchannel framebuffer card (1280x1024x1)
used mainly in the MIPS-based DECstation series.
@ -1561,9 +1513,7 @@ config FB_PMAG_AA
config FB_PMAG_BA
tristate "PMAG-BA TURBOchannel framebuffer support"
depends on FB && TC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
Support for the PMAG-BA TURBOchannel framebuffer card (1024x864x8)
used mainly in the MIPS-based DECstation series.
@ -1571,9 +1521,7 @@ config FB_PMAG_BA
config FB_PMAGB_B
tristate "PMAGB-B TURBOchannel framebuffer support"
depends on FB && TC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
Support for the PMAGB-B TURBOchannel framebuffer card used mainly
in the MIPS-based DECstation series. The card is currently only
@ -1582,9 +1530,7 @@ config FB_PMAGB_B
config FB_MAXINE
bool "Maxine (Personal DECstation) onboard framebuffer support"
depends on (FB = y) && MACH_DECSTATION
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
Support for the onboard framebuffer (1024x768x8) in the Personal
DECstation series (Personal DECstation 5000/20, /25, /33, /50,
@ -1593,9 +1539,7 @@ config FB_MAXINE
config FB_G364
bool "G364 frame buffer support"
depends on (FB = y) && (MIPS_MAGNUM_4000 || OLIVETTI_M700)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
The G364 driver is the framebuffer used in MIPS Magnum 4000 and
Olivetti M700-10 systems.
@ -1614,9 +1558,7 @@ config FB_PXA168
tristate "PXA168/910 LCD framebuffer support"
depends on FB && HAVE_CLK && HAS_IOMEM
depends on CPU_PXA168 || CPU_PXA910 || COMPILE_TEST
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
Frame buffer driver for the built-in LCD controller in the Marvell
MMP processor.
@ -1624,9 +1566,7 @@ config FB_PXA168
config FB_PXA
tristate "PXA LCD framebuffer support"
depends on FB && ARCH_PXA
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEOMODE_HELPERS if OF
select FB_MODE_HELPERS if OF
help
@ -1677,10 +1617,8 @@ config PXA3XX_GCU
config FB_FSL_DIU
tristate "Freescale DIU framebuffer support"
depends on FB && FSL_SOC
select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select PPC_LIB_RHEAP
help
Framebuffer driver for the Freescale SoC DIU
@ -1703,9 +1641,7 @@ config FB_S3C
tristate "Samsung S3C framebuffer support"
depends on FB && HAVE_CLK && HAS_IOMEM
depends on ARCH_S3C64XX || COMPILE_TEST
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
Frame buffer driver for the built-in FB controller in the Samsung
SoC line such as the S3C6400 and S3C6410.
@ -1774,9 +1710,7 @@ config FB_UDL
config FB_IBM_GXT4500
tristate "Framebuffer support for IBM GXT4000P/4500P/6000P/6500P adaptors"
depends on FB
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Say Y here to enable support for the IBM GXT4000P/6000P and
@ -1808,9 +1742,7 @@ config FB_PS3_DEFAULT_SIZE_M
config FB_XILINX
tristate "Xilinx frame buffer support"
depends on FB && (MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
Include support for the Xilinx ML300/ML403 reference design
framebuffer. ML300 carries a 640*480 LCD display on the board,
@ -1820,9 +1752,7 @@ config FB_GOLDFISH
tristate "Goldfish Framebuffer"
depends on FB
depends on GOLDFISH || COMPILE_TEST
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
Framebuffer driver for Goldfish Virtual Platform
@ -1834,9 +1764,7 @@ config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
Support for the SH7760/SH7763/SH7720/SH7721 integrated
(D)STN/TFT LCD Controller.
@ -1849,10 +1777,8 @@ config FB_DA8XX
tristate "DA8xx/OMAP-L1xx/AM335x Framebuffer support"
depends on FB && HAVE_CLK && HAS_IOMEM
depends on ARCH_DAVINCI_DA8XX || SOC_AM33XX || COMPILE_TEST
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_CFB_REV_PIXELS_IN_BYTE
select FB_IOMEM_HELPERS
select FB_MODE_HELPERS
select VIDEOMODE_HELPERS
help
@ -1907,9 +1833,7 @@ config FB_MB862XX
tristate "Fujitsu MB862xx GDC support"
depends on FB
depends on PCI || (OF && PPC)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers.
@ -1967,9 +1891,7 @@ config FB_MX3
tristate "MX3 Framebuffer support"
depends on FB && MX3_IPU
select BACKLIGHT_CLASS_DEVICE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
default y
help
This is a framebuffer device for the i.MX31 LCD Controller. So
@ -2003,9 +1925,7 @@ config FB_SIMPLE
depends on FB
depends on !DRM_SIMPLEDRM
select APERTURE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
help
Say Y if you want support for a simple frame-buffer.

View file

@ -605,13 +605,11 @@ acornfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
static const struct fb_ops acornfb_ops = {
.owner = THIS_MODULE,
FB_IOMEM_DEFAULT_OPS,
.fb_check_var = acornfb_check_var,
.fb_set_par = acornfb_set_par,
.fb_setcolreg = acornfb_setcolreg,
.fb_pan_display = acornfb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/*

View file

@ -98,12 +98,10 @@ static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
static const struct fb_ops asiliantfb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = asiliantfb_check_var,
.fb_set_par = asiliantfb_set_par,
.fb_setcolreg = asiliantfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/* Calculate the ratios for the dot clocks without using a single long long

View file

@ -806,14 +806,12 @@ static int atmel_lcdfb_blank(int blank_mode, struct fb_info *info)
static const struct fb_ops atmel_lcdfb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = atmel_lcdfb_check_var,
.fb_set_par = atmel_lcdfb_set_par,
.fb_setcolreg = atmel_lcdfb_setcolreg,
.fb_blank = atmel_lcdfb_blank,
.fb_pan_display = atmel_lcdfb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static irqreturn_t atmel_lcdfb_interrupt(int irq, void *dev_id)

View file

@ -504,6 +504,7 @@ static void aty128_bl_set_power(struct fb_info *info, int power);
static const struct fb_ops aty128fb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = aty128fb_check_var,
.fb_set_par = aty128fb_set_par,
.fb_setcolreg = aty128fb_setcolreg,
@ -511,9 +512,6 @@ static const struct fb_ops aty128fb_ops = {
.fb_blank = aty128fb_blank,
.fb_ioctl = aty128fb_ioctl,
.fb_sync = aty128fb_sync,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/*

View file

@ -530,10 +530,7 @@ static int init_hardware(struct carmine_hw *hw)
static const struct fb_ops carminefb_ops = {
.owner = THIS_MODULE,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = carmine_check_var,
.fb_set_par = carmine_set_par,
.fb_setcolreg = carmine_setcolreg,

View file

@ -82,13 +82,11 @@ static int chipsfb_blank(int blank, struct fb_info *info);
static const struct fb_ops chipsfb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = chipsfb_check_var,
.fb_set_par = chipsfb_set_par,
.fb_setcolreg = chipsfb_setcolreg,
.fb_blank = chipsfb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static int chipsfb_check_var(struct fb_var_screeninfo *var,

View file

@ -1295,14 +1295,12 @@ static int da8xxfb_set_par(struct fb_info *info)
static const struct fb_ops da8xx_fb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = fb_check_var,
.fb_set_par = da8xxfb_set_par,
.fb_setcolreg = fb_setcolreg,
.fb_pan_display = da8xx_pan_display,
.fb_ioctl = fb_ioctl,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_blank = cfb_blank,
};

View file

@ -277,11 +277,9 @@ static void efifb_destroy(struct fb_info *info)
static const struct fb_ops efifb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_destroy = efifb_destroy,
.fb_setcolreg = efifb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static int efifb_setup(char *options)

View file

@ -167,11 +167,9 @@ static int fm2fb_blank(int blank, struct fb_info *info);
static const struct fb_ops fm2fb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = fm2fb_setcolreg,
.fb_blank = fm2fb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/*

View file

@ -1451,13 +1451,11 @@ static int fsl_diu_release(struct fb_info *info, int user)
static const struct fb_ops fsl_diu_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = fsl_diu_check_var,
.fb_set_par = fsl_diu_set_par,
.fb_setcolreg = fsl_diu_setcolreg,
.fb_pan_display = fsl_diu_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_ioctl = fsl_diu_ioctl,
.fb_open = fsl_diu_open,
.fb_release = fsl_diu_release,

View file

@ -112,12 +112,10 @@ static int g364fb_blank(int blank, struct fb_info *info);
static const struct fb_ops g364fb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_HELPERS,
.fb_setcolreg = g364fb_setcolreg,
.fb_pan_display = g364fb_pan_display,
.fb_blank = g364fb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/*

View file

@ -13,9 +13,7 @@ config FB_GEODE
config FB_GEODE_LX
tristate "AMD Geode LX framebuffer support"
depends on FB && FB_GEODE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the
@ -29,9 +27,7 @@ config FB_GEODE_LX
config FB_GEODE_GX
tristate "AMD Geode GX framebuffer support"
depends on FB && FB_GEODE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the
@ -45,9 +41,7 @@ config FB_GEODE_GX
config FB_GEODE_GX1
tristate "AMD Geode GX1 framebuffer support"
depends on FB && FB_GEODE
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
select VIDEO_NOMODESET
help
Framebuffer driver for the display controller integrated into the

View file

@ -255,14 +255,11 @@ static int parse_panel_option(struct fb_info *info)
static const struct fb_ops gx1fb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = gx1fb_check_var,
.fb_set_par = gx1fb_set_par,
.fb_setcolreg = gx1fb_setcolreg,
.fb_blank = gx1fb_blank,
/* No HW acceleration for now. */
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static struct fb_info *gx1fb_init_fbinfo(struct device *dev)

View file

@ -268,14 +268,11 @@ static int gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
static const struct fb_ops gxfb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = gxfb_check_var,
.fb_set_par = gxfb_set_par,
.fb_setcolreg = gxfb_setcolreg,
.fb_blank = gxfb_blank,
/* No HW acceleration for now. */
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static struct fb_info *gxfb_init_fbinfo(struct device *dev)

View file

@ -392,14 +392,11 @@ static int lxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
static const struct fb_ops lxfb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = lxfb_check_var,
.fb_set_par = lxfb_set_par,
.fb_setcolreg = lxfb_setcolreg,
.fb_blank = lxfb_blank,
/* No HW acceleration for now. */
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static struct fb_info *lxfb_init_fbinfo(struct device *dev)

View file

@ -162,14 +162,12 @@ static int goldfish_fb_blank(int blank, struct fb_info *info)
static const struct fb_ops goldfish_fb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = goldfish_fb_check_var,
.fb_set_par = goldfish_fb_set_par,
.fb_setcolreg = goldfish_fb_setcolreg,
.fb_pan_display = goldfish_fb_pan_display,
.fb_blank = goldfish_fb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};

View file

@ -253,13 +253,11 @@ static int grvga_pan_display(struct fb_var_screeninfo *var,
static const struct fb_ops grvga_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = grvga_check_var,
.fb_set_par = grvga_set_par,
.fb_setcolreg = grvga_setcolreg,
.fb_pan_display = grvga_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit
};
static int grvga_parse_custom(char *options,

View file

@ -602,14 +602,12 @@ static const struct fb_fix_screeninfo gxt4500_fix = {
static const struct fb_ops gxt4500_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = gxt4500_check_var,
.fb_set_par = gxt4500_set_par,
.fb_setcolreg = gxt4500_setcolreg,
.fb_pan_display = gxt4500_pan_display,
.fb_blank = gxt4500_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/* PCI functions */

View file

@ -994,14 +994,12 @@ static const struct fb_ops i740fb_ops = {
.owner = THIS_MODULE,
.fb_open = i740fb_open,
.fb_release = i740fb_release,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = i740fb_check_var,
.fb_set_par = i740fb_set_par,
.fb_setcolreg = i740fb_setcolreg,
.fb_blank = i740fb_blank,
.fb_pan_display = i740fb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/* ------------------------------------------------------------------------- */

View file

@ -580,12 +580,10 @@ static int imxfb_blank(int blank, struct fb_info *info)
static const struct fb_ops imxfb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = imxfb_check_var,
.fb_set_par = imxfb_set_par,
.fb_setcolreg = imxfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_blank = imxfb_blank,
};

View file

@ -661,13 +661,11 @@ static struct pci_driver kyrofb_pci_driver = {
static const struct fb_ops kyrofb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = kyrofb_check_var,
.fb_set_par = kyrofb_set_par,
.fb_setcolreg = kyrofb_setcolreg,
.fb_ioctl = kyrofb_ioctl,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)

View file

@ -498,10 +498,8 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
static const struct fb_ops macfb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = macfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static void __init macfb_setup(char *options)

View file

@ -107,10 +107,8 @@ static int maxinefb_setcolreg(unsigned regno, unsigned red, unsigned green,
static const struct fb_ops maxinefb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = maxinefb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
int __init maxinefb_init(void)

View file

@ -407,14 +407,12 @@ static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd,
/* framebuffer ops */
static struct fb_ops mb862xxfb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_check_var = mb862xxfb_check_var,
.fb_set_par = mb862xxfb_set_par,
.fb_setcolreg = mb862xxfb_setcolreg,
.fb_blank = mb862xxfb_blank,
.fb_pan_display = mb862xxfb_pan,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_ioctl = mb862xxfb_ioctl,
};

View file

@ -2,9 +2,7 @@
config MMP_FB
tristate "fb driver for Marvell MMP Display Subsystem"
depends on FB
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_IOMEM_HELPERS
default y
help
fb driver for Marvell MMP Display Subsystem

View file

@ -454,14 +454,12 @@ static int mmpfb_blank(int blank, struct fb_info *info)
static const struct fb_ops mmpfb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_blank = mmpfb_blank,
.fb_check_var = mmpfb_check_var,
.fb_set_par = mmpfb_set_par,
.fb_setcolreg = mmpfb_setcolreg,
.fb_pan_display = mmpfb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static int modes_setup(struct mmpfb_info *fbi)

View file

@ -1247,13 +1247,11 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
*/
static const struct fb_ops mx3fb_ops = {
.owner = THIS_MODULE,
FB_DEFAULT_IOMEM_OPS,
.fb_set_par = mx3fb_set_par,
.fb_check_var = mx3fb_check_var,
.fb_setcolreg = mx3fb_setcolreg,
.fb_pan_display = mx3fb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_blank = mx3fb_blank,
};

Some files were not shown because too many files have changed in this diff Show more