Merge drm/drm-next into drm-intel-next-queued

We need avi infoframe stuff who got merged via drm-misc

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Rodrigo Vivi 2019-01-22 14:51:36 -08:00
commit f42fb2317f
231 changed files with 4290 additions and 2289 deletions

View file

@ -8,6 +8,7 @@ Required properties:
- compatible : Shall contain one of
- "renesas,r8a7743-lvds" for R8A7743 (RZ/G1M) compatible LVDS encoders
- "renesas,r8a774c0-lvds" for R8A774C0 (RZ/G2E) compatible LVDS encoders
- "renesas,r8a7790-lvds" for R8A7790 (R-Car H2) compatible LVDS encoders
- "renesas,r8a7791-lvds" for R8A7791 (R-Car M2-W) compatible LVDS encoders
- "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders
@ -25,7 +26,7 @@ Required properties:
- clock-names: Name of the clocks. This property is model-dependent.
- The functional clock, which mandatory for all models, shall be listed
first, and shall be named "fck".
- On R8A77990 and R8A77995, the LVDS encoder can use the EXTAL or
- On R8A77990, R8A77995 and R8A774C0, the LVDS encoder can use the EXTAL or
DU_DOTCLKINx clocks. Those clocks are optional. When supplied they must be
named "extal" and "dclkin.x" respectively, with "x" being the DU_DOTCLKIN
numerical index.

View file

@ -7,6 +7,7 @@ Required Properties:
- "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU
- "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
- "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
- "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
- "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
@ -57,6 +58,7 @@ corresponding to each DU output.
R8A7744 (RZ/G1N) DPAD 0 LVDS 0 - -
R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - -
R8A77470 (RZ/G1C) DPAD 0 DPAD 1 LVDS 0 -
R8A774C0 (RZ/G2E) DPAD 0 LVDS 0 LVDS 1 -
R8A7779 (R-Car H1) DPAD 0 DPAD 1 - -
R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 -
R8A7791 (R-Car M2-W) DPAD 0 LVDS 0 - -

View file

@ -10,6 +10,7 @@ Required properties:
"rockchip,rk3126-vop";
"rockchip,px30-vop-lit";
"rockchip,px30-vop-big";
"rockchip,rk3066-vop";
"rockchip,rk3188-vop";
"rockchip,rk3288-vop";
"rockchip,rk3368-vop";

View file

@ -0,0 +1,52 @@
digraph T {
/* Make sure our payloads are always drawn below the driver node */
subgraph cluster_driver {
fillcolor = grey;
style = filled;
driver -> {payload1, payload2} [dir=none];
}
/* Driver malloc references */
edge [style=dashed];
driver -> port1;
driver -> port2;
driver -> port3:e;
driver -> port4;
payload1:s -> port1:e;
payload2:s -> port3:e;
edge [style=""];
subgraph cluster_topology {
label="Topology Manager";
labelloc=bottom;
/* Topology references */
mstb1 -> {port1, port2};
port1 -> mstb2;
port2 -> mstb3 -> {port3, port4};
port3 -> mstb4;
/* Malloc references */
edge [style=dashed;dir=back];
mstb1 -> {port1, port2};
port1 -> mstb2;
port2 -> mstb3 -> {port3, port4};
port3 -> mstb4;
}
driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue];
payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue];
payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue];
mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen;shape=oval];
mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen;shape=oval];
mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;shape=oval];
mstb4 [label="MSTB #4";style=filled;fillcolor=palegreen;shape=oval];
port1 [label="Port #1";shape=oval];
port2 [label="Port #2";shape=oval];
port3 [label="Port #3";shape=oval];
port4 [label="Port #4";shape=oval];
}

View file

@ -0,0 +1,56 @@
digraph T {
/* Make sure our payloads are always drawn below the driver node */
subgraph cluster_driver {
fillcolor = grey;
style = filled;
driver -> {payload1, payload2} [dir=none];
}
/* Driver malloc references */
edge [style=dashed];
driver -> port1;
driver -> port2;
driver -> port3:e;
driver -> port4 [color=red];
payload1:s -> port1:e;
payload2:s -> port3:e;
edge [style=""];
subgraph cluster_topology {
label="Topology Manager";
labelloc=bottom;
/* Topology references */
mstb1 -> {port1, port2};
port1 -> mstb2;
edge [color=red];
port2 -> mstb3 -> {port3, port4};
port3 -> mstb4;
edge [color=""];
/* Malloc references */
edge [style=dashed;dir=back];
mstb1 -> {port1, port2};
port1 -> mstb2;
port2 -> mstb3 -> port3;
edge [color=red];
mstb3 -> port4;
port3 -> mstb4;
}
mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen];
mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen];
mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen];
mstb4 [label="MSTB #4";style=filled;fillcolor=grey];
port1 [label="Port #1"];
port2 [label="Port #2"];
port3 [label="Port #3"];
port4 [label="Port #4";style=filled;fillcolor=grey];
driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue];
payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue];
payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue];
}

View file

@ -0,0 +1,59 @@
digraph T {
/* Make sure our payloads are always drawn below the driver node */
subgraph cluster_driver {
fillcolor = grey;
style = filled;
edge [dir=none];
driver -> payload1;
driver -> payload2 [penwidth=3];
edge [dir=""];
}
/* Driver malloc references */
edge [style=dashed];
driver -> port1;
driver -> port2;
driver -> port3:e;
driver -> port4 [color=grey];
payload1:s -> port1:e;
payload2:s -> port3:e [penwidth=3];
edge [style=""];
subgraph cluster_topology {
label="Topology Manager";
labelloc=bottom;
/* Topology references */
mstb1 -> {port1, port2};
port1 -> mstb2;
edge [color=grey];
port2 -> mstb3 -> {port3, port4};
port3 -> mstb4;
edge [color=""];
/* Malloc references */
edge [style=dashed;dir=back];
mstb1 -> {port1, port2};
port1 -> mstb2;
port2 -> mstb3 [penwidth=3];
mstb3 -> port3 [penwidth=3];
edge [color=grey];
mstb3 -> port4;
port3 -> mstb4;
}
mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen];
mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen];
mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;penwidth=3];
mstb4 [label="MSTB #4";style=filled;fillcolor=grey];
port1 [label="Port #1"];
port2 [label="Port #2";penwidth=5];
port3 [label="Port #3";penwidth=3];
port4 [label="Port #4";style=filled;fillcolor=grey];
driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue];
payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue];
payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue;penwidth=3];
}

View file

@ -143,6 +143,9 @@ Device Instance and Driver Handling
.. kernel-doc:: drivers/gpu/drm/drm_drv.c
:doc: driver instance overview
.. kernel-doc:: include/drm/drm_device.h
:internal:
.. kernel-doc:: include/drm/drm_drv.h
:internal:
@ -230,6 +233,15 @@ Printer
.. kernel-doc:: drivers/gpu/drm/drm_print.c
:export:
Utilities
---------
.. kernel-doc:: include/drm/drm_util.h
:doc: drm utils
.. kernel-doc:: include/drm/drm_util.h
:internal:
Legacy Support Code
===================

View file

@ -116,8 +116,6 @@ Framebuffer CMA Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c
:export:
.. _drm_bridges:
Framebuffer GEM Helper Reference
================================
@ -127,6 +125,8 @@ Framebuffer GEM Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
:export:
.. _drm_bridges:
Bridges
=======
@ -208,18 +208,40 @@ Display Port Dual Mode Adaptor Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c
:export:
Display Port MST Helper Functions Reference
===========================================
Display Port MST Helpers
========================
Overview
--------
.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
:doc: dp mst helper
.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
:doc: Branch device and port refcounting
Functions Reference
-------------------
.. kernel-doc:: include/drm/drm_dp_mst_helper.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
:export:
Topology Lifetime Internals
---------------------------
These functions aren't exported to drivers, but are documented here to help make
the MST topology helpers easier to understand
.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
:functions: drm_dp_mst_topology_try_get_mstb drm_dp_mst_topology_get_mstb
drm_dp_mst_topology_put_mstb
drm_dp_mst_topology_try_get_port drm_dp_mst_topology_get_port
drm_dp_mst_topology_put_port
drm_dp_mst_get_mstb_malloc drm_dp_mst_put_mstb_malloc
MIPI DSI Helper Functions Reference
===================================

View file

@ -209,6 +209,36 @@ Would be great to refactor this all into a set of small common helpers.
Contact: Daniel Vetter
Generic fbdev defio support
---------------------------
The defio support code in the fbdev core has some very specific requirements,
which means drivers need to have a special framebuffer for fbdev. Which prevents
us from using the generic fbdev emulation code everywhere. The main issue is
that it uses some fields in struct page itself, which breaks shmem gem objects
(and other things).
Possible solution would be to write our own defio mmap code in the drm fbdev
emulation. It would need to fully wrap the existing mmap ops, forwarding
everything after it has done the write-protect/mkwrite trickery:
- In the drm_fbdev_fb_mmap helper, if we need defio, change the
default page prots to write-protected with something like this::
vma->vm_page_prot = pgprot_wrprotect(vma->vm_page_prot);
- Set the mkwrite and fsync callbacks with similar implementions to the core
fbdev defio stuff. These should all work on plain ptes, they don't actually
require a struct page. uff. These should all work on plain ptes, they don't
actually require a struct page.
- Track the dirty pages in a separate structure (bitfield with one bit per page
should work) to avoid clobbering struct page.
Might be good to also have some igt testcases for this.
Contact: Daniel Vetter, Noralf Tronnes
Put a reservation_object into drm_gem_object
--------------------------------------------
@ -354,9 +384,6 @@ KMS cleanups
Some of these date from the very introduction of KMS in 2008 ...
- drm_mode_config.crtc_idr is misnamed, since it contains all KMS object. Should
be renamed to drm_mode_config.object_idr.
- drm_display_mode doesn't need to be derived from drm_mode_object. That's
leftovers from older (never merged into upstream) KMS designs where modes
where set using their ID, including support to add/remove modes.

View file

@ -4873,6 +4873,7 @@ DRM DRIVER FOR QXL VIRTUAL GPU
M: Dave Airlie <airlied@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux-foundation.org
L: spice-devel@lists.freedesktop.org
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/qxl/
@ -4909,6 +4910,13 @@ DRM DRIVER FOR TDFX VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/tdfx/
DRM DRIVER FOR TPO TPG110 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/panel/panel-tpo-tpg110.c
F: Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
M: Dave Airlie <airlied@redhat.com>
R: Sean Paul <sean@poorly.run>
@ -4917,6 +4925,16 @@ S: Odd Fixes
F: drivers/gpu/drm/udl/
T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
M: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
R: Haneen Mohammed <hamohammed.sa@gmail.com>
R: Daniel Vetter <daniel@ffwll.ch>
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
L: dri-devel@lists.freedesktop.org
F: drivers/gpu/drm/vkms/
F: Documentation/gpu/vkms.rst
DRM DRIVER FOR VMWARE VIRTUAL GPU
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
M: Thomas Hellstrom <thellstrom@vmware.com>
@ -4986,7 +5004,6 @@ F: Documentation/devicetree/bindings/display/atmel/
T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVERS FOR BRIDGE CHIPS
M: Archit Taneja <architt@codeaurora.org>
M: Andrzej Hajda <a.hajda@samsung.com>
R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
S: Maintained

View file

@ -1093,17 +1093,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
return 0;
}
static int dma_buf_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, dma_buf_debug_show, NULL);
}
static const struct file_operations dma_buf_debug_fops = {
.open = dma_buf_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
static struct dentry *dma_buf_debugfs_dir;

View file

@ -649,7 +649,7 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
*/
void
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, unsigned seqno)
spinlock_t *lock, u64 context, u64 seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);

View file

@ -172,7 +172,7 @@ static bool timeline_fence_enable_signaling(struct dma_fence *fence)
static void timeline_fence_value_str(struct dma_fence *fence,
char *str, int size)
{
snprintf(str, size, "%d", fence->seqno);
snprintf(str, size, "%lld", fence->seqno);
}
static void timeline_fence_timeline_value_str(struct dma_fence *fence,

View file

@ -147,7 +147,7 @@ static void sync_print_sync_file(struct seq_file *s,
}
}
static int sync_debugfs_show(struct seq_file *s, void *unused)
static int sync_info_debugfs_show(struct seq_file *s, void *unused)
{
struct list_head *pos;
@ -178,17 +178,7 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
return 0;
}
static int sync_info_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sync_debugfs_show, inode->i_private);
}
static const struct file_operations sync_info_debugfs_fops = {
.open = sync_info_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
DEFINE_SHOW_ATTRIBUTE(sync_info_debugfs);
static __init int sync_debugfs_init(void)
{
@ -218,7 +208,7 @@ void sync_dump(void)
};
int i;
sync_debugfs_show(&s, NULL);
sync_info_debugfs_show(&s, NULL);
for (i = 0; i < s.count; i += DUMP_CHUNK) {
if ((s.count - i) > DUMP_CHUNK) {

View file

@ -144,7 +144,7 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
} else {
struct dma_fence *fence = sync_file->fence;
snprintf(buf, len, "%s-%s%llu-%d",
snprintf(buf, len, "%s-%s%llu-%lld",
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),
fence->context,
@ -258,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
i_b++;
} else {
if (pt_a->seqno - pt_b->seqno <= INT_MAX)
if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno))
add_fence(fences, &i, pt_a);
else
add_fence(fences, &i, pt_b);

View file

@ -2708,7 +2708,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
if (!amdgpu_device_has_dc_support(adev))
drm_crtc_force_disable_all(adev->ddev);
drm_helper_force_disable_all(adev->ddev);
else
drm_atomic_helper_shutdown(adev->ddev);
}

View file

@ -388,7 +388,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
soffset, eoffset, eoffset - soffset);
if (i->fence)
seq_printf(m, " protected by 0x%08x on context %llu",
seq_printf(m, " protected by 0x%016llx on context %llu",
i->fence->seqno, i->fence->context);
seq_printf(m, "\n");

View file

@ -27,6 +27,8 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <drm/drm_util.h>
#define ATOM_DEBUG
#include "atom.h"

View file

@ -1682,7 +1682,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
dce_v10_0_audio_write_sad_regs(encoder);
dce_v10_0_audio_write_latency_fields(encoder, mode);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;

View file

@ -1724,7 +1724,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
dce_v11_0_audio_write_sad_regs(encoder);
dce_v11_0_audio_write_latency_fields(encoder, mode);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;

View file

@ -1423,6 +1423,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
struct hdmi_avi_infoframe frame;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
uint8_t *payload = buffer + 3;
@ -1430,7 +1431,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
ssize_t err;
u32 tmp;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;

View file

@ -1616,7 +1616,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
dce_v8_0_audio_write_sad_regs(encoder);
dce_v8_0_audio_write_latency_fields(encoder, mode);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;

View file

@ -1692,7 +1692,8 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
drm_atomic_private_obj_init(&adev->dm.atomic_obj,
drm_atomic_private_obj_init(adev->ddev,
&adev->dm.atomic_obj,
&state->base,
&dm_atomic_state_funcs);

View file

@ -191,6 +191,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
drm_connector_cleanup(connector);
drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
kfree(amdgpu_dm_connector);
}
@ -363,7 +364,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
amdgpu_dm_connector_funcs_reset(connector);
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
aconnector, connector->base.id, aconnector->mst_port);
drm_dp_mst_get_port_malloc(port);
DRM_DEBUG_KMS(":%d\n", connector->base.id);
@ -379,12 +382,12 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
aconnector, connector->base.id, aconnector->mst_port);
aconnector->port = NULL;
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
dc_link_remove_remote_sink(aconnector->dc_link,
aconnector->dc_sink);
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
}
@ -395,14 +398,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_put(connector);
}
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
{
struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
struct drm_device *dev = master->base.dev;
drm_kms_helper_hotplug_event(dev);
}
static void dm_dp_mst_register_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@ -419,7 +414,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
.destroy_connector = dm_dp_destroy_mst_connector,
.hotplug = dm_dp_mst_hotplug,
.register_connector = dm_dp_mst_register_connector
};

View file

@ -16,8 +16,10 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_plane_helper.h>
#include <linux/clk.h>
#include <linux/platform_data/simplefb.h>

View file

@ -16,12 +16,18 @@
#include <linux/clk.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_atomic_helper.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include "arcpgu.h"
#include "arcpgu_regs.h"

View file

@ -51,7 +51,6 @@ arcpgu_drm_connector_helper_funcs = {
};
static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = arcpgu_drm_connector_destroy,

View file

@ -270,13 +270,7 @@ static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
tm = adj->crtc_vtotal - adj->crtc_vsync_end;
DRM_DEBUG_KMS("[CRTC:%d:%s] mode " DRM_MODE_FMT "\n",
crtc->base.id, crtc->name,
adj->base.id, adj->name, adj->vrefresh, adj->clock,
adj->crtc_hdisplay, adj->crtc_hsync_start,
adj->crtc_hsync_end, adj->crtc_htotal,
adj->crtc_vdisplay, adj->crtc_vsync_start,
adj->crtc_vsync_end, adj->crtc_vtotal,
adj->type, adj->flags);
crtc->base.id, crtc->name, DRM_MODE_ARG(adj));
DRM_DEBUG_KMS("lm %d rm %d tm %d bm %d\n", lm, rm, tm, bm);
/* Now compute the divider for real */

View file

@ -39,7 +39,9 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_util.h>
#include <drm/drm_crtc_helper.h>
#include "ast_drv.h"
static void ast_dirty_update(struct ast_fbdev *afbdev,
@ -191,7 +193,6 @@ static int astfb_create(struct drm_fb_helper *helper,
int size, ret;
void *sysram;
struct drm_gem_object *gobj = NULL;
struct ast_bo *bo = NULL;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
@ -206,7 +207,6 @@ static int astfb_create(struct drm_fb_helper *helper,
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
bo = gem_to_ast_bo(gobj);
sysram = vmalloc(size);
if (!sysram)
@ -263,7 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
{
struct ast_framebuffer *afb = &afbdev->afb;
drm_crtc_force_disable_all(dev);
drm_helper_force_disable_all(dev);
drm_fb_helper_unregister_fbi(&afbdev->helper);
if (afb->obj) {

View file

@ -103,7 +103,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
unsigned long pages;
u32 *pci_gart = NULL, page_base, gart_idx;
dma_addr_t bus_address = 0;
int i, j, ret = 0;
int i, j, ret = -ENOMEM;
int max_ati_pages, max_real_pages;
if (!entry) {
@ -117,7 +117,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
(unsigned long long)gart_info->table_mask);
ret = 1;
ret = -EFAULT;
goto done;
}
@ -160,6 +160,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL;
bus_address = 0;
ret = -ENOMEM;
goto done;
}
page_base = (u32) entry->busaddr[i];
@ -188,7 +189,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
page_base += ATI_PCIGART_PAGE_SIZE;
}
}
ret = 1;
ret = 0;
#if defined(__i386__) || defined(__x86_64__)
wbinvd();

View file

@ -1,3 +1,3 @@
bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_fbdev.o bochs_hw.o
bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_hw.o
obj-$(CONFIG_DRM_BOCHS) += bochs-drm.o

View file

@ -80,12 +80,6 @@ struct bochs_device {
struct ttm_bo_device bdev;
bool initialized;
} ttm;
/* fbdev */
struct {
struct drm_framebuffer *fb;
struct drm_fb_helper helper;
} fb;
};
struct bochs_bo {
@ -121,8 +115,9 @@ int bochs_hw_init(struct drm_device *dev);
void bochs_hw_fini(struct drm_device *dev);
void bochs_hw_setmode(struct bochs_device *bochs,
struct drm_display_mode *mode,
const struct drm_format_info *format);
struct drm_display_mode *mode);
void bochs_hw_setformat(struct bochs_device *bochs,
const struct drm_format_info *format);
void bochs_hw_setbase(struct bochs_device *bochs,
int x, int y, u64 addr);
int bochs_hw_load_edid(struct bochs_device *bochs);
@ -141,15 +136,19 @@ int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr);
int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag);
int bochs_bo_unpin(struct bochs_bo *bo);
int bochs_gem_prime_pin(struct drm_gem_object *obj);
void bochs_gem_prime_unpin(struct drm_gem_object *obj);
void *bochs_gem_prime_vmap(struct drm_gem_object *obj);
void bochs_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int bochs_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);
/* bochs_kms.c */
int bochs_kms_init(struct bochs_device *bochs);
void bochs_kms_fini(struct bochs_device *bochs);
/* bochs_fbdev.c */
int bochs_fbdev_init(struct bochs_device *bochs);
void bochs_fbdev_fini(struct bochs_device *bochs);
extern const struct drm_mode_config_funcs bochs_mode_funcs;

View file

@ -16,10 +16,6 @@ static int bochs_modeset = -1;
module_param_named(modeset, bochs_modeset, int, 0444);
MODULE_PARM_DESC(modeset, "enable/disable kernel modesetting");
static bool enable_fbdev = true;
module_param_named(fbdev, enable_fbdev, bool, 0444);
MODULE_PARM_DESC(fbdev, "register fbdev device");
/* ---------------------------------------------------------------------- */
/* drm interface */
@ -27,7 +23,6 @@ static void bochs_unload(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
bochs_fbdev_fini(bochs);
bochs_kms_fini(bochs);
bochs_mm_fini(bochs);
bochs_hw_fini(dev);
@ -58,9 +53,6 @@ static int bochs_load(struct drm_device *dev)
if (ret)
goto err;
if (enable_fbdev)
bochs_fbdev_init(bochs);
return 0;
err:
@ -81,7 +73,8 @@ static const struct file_operations bochs_fops = {
};
static struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
DRIVER_PRIME,
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",
@ -91,6 +84,14 @@ static struct drm_driver bochs_driver = {
.gem_free_object_unlocked = bochs_gem_free_object,
.dumb_create = bochs_dumb_create,
.dumb_map_offset = bochs_dumb_mmap_offset,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = bochs_gem_prime_pin,
.gem_prime_unpin = bochs_gem_prime_unpin,
.gem_prime_vmap = bochs_gem_prime_vmap,
.gem_prime_vunmap = bochs_gem_prime_vunmap,
.gem_prime_mmap = bochs_gem_prime_mmap,
};
/* ---------------------------------------------------------------------- */
@ -101,27 +102,16 @@ static int bochs_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct bochs_device *bochs = drm_dev->dev_private;
drm_kms_helper_poll_disable(drm_dev);
drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 1);
return 0;
return drm_mode_config_helper_suspend(drm_dev);
}
static int bochs_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct bochs_device *bochs = drm_dev->dev_private;
drm_helper_resume_force_mode(drm_dev);
drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 0);
drm_kms_helper_poll_enable(drm_dev);
return 0;
return drm_mode_config_helper_resume(drm_dev);
}
#endif
@ -165,6 +155,7 @@ static int bochs_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_unload;
drm_fbdev_generic_setup(dev, 32);
return ret;
err_unload:

View file

@ -1,163 +0,0 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include "bochs.h"
#include <drm/drm_gem_framebuffer_helper.h>
/* ---------------------------------------------------------------------- */
static int bochsfb_mmap(struct fb_info *info,
struct vm_area_struct *vma)
{
struct drm_fb_helper *fb_helper = info->par;
struct bochs_bo *bo = gem_to_bochs_bo(fb_helper->fb->obj[0]);
return ttm_fbdev_mmap(vma, &bo->bo);
}
static struct fb_ops bochsfb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_mmap = bochsfb_mmap,
};
static int bochsfb_create_object(struct bochs_device *bochs,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct drm_device *dev = bochs->dev;
struct drm_gem_object *gobj;
u32 size;
int ret = 0;
size = mode_cmd->pitches[0] * mode_cmd->height;
ret = bochs_gem_create(dev, size, true, &gobj);
if (ret)
return ret;
*gobj_p = gobj;
return ret;
}
static int bochsfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct bochs_device *bochs =
container_of(helper, struct bochs_device, fb.helper);
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct bochs_bo *bo = NULL;
int size, ret;
if (sizes->surface_bpp != 32)
return -EINVAL;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = sizes->surface_width * 4;
mode_cmd.pixel_format = DRM_FORMAT_HOST_XRGB8888;
size = mode_cmd.pitches[0] * mode_cmd.height;
/* alloc, pin & map bo */
ret = bochsfb_create_object(bochs, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
bo = gem_to_bochs_bo(gobj);
ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret)
return ret;
ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
if (ret) {
DRM_ERROR("failed to pin fbcon\n");
ttm_bo_unreserve(&bo->bo);
return ret;
}
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages,
&bo->kmap);
if (ret) {
DRM_ERROR("failed to kmap fbcon\n");
ttm_bo_unreserve(&bo->bo);
return ret;
}
ttm_bo_unreserve(&bo->bo);
/* init fb device */
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
DRM_ERROR("Failed to allocate fbi: %ld\n", PTR_ERR(info));
return PTR_ERR(info);
}
info->par = &bochs->fb.helper;
fb = drm_gem_fbdev_fb_create(bochs->dev, sizes, 0, gobj, NULL);
if (IS_ERR(fb)) {
DRM_ERROR("Failed to create framebuffer: %ld\n", PTR_ERR(fb));
return PTR_ERR(fb);
}
/* setup helper */
bochs->fb.helper.fb = fb;
strcpy(info->fix.id, "bochsdrmfb");
info->fbops = &bochsfb_ops;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width,
sizes->fb_height);
info->screen_base = bo->kmap.virtual;
info->screen_size = size;
drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node);
info->fix.smem_start = 0;
info->fix.smem_len = size;
return 0;
}
static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
.fb_probe = bochsfb_create,
};
static struct drm_framebuffer *
bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 &&
mode_cmd->pixel_format != DRM_FORMAT_BGRX8888)
return ERR_PTR(-EINVAL);
return drm_gem_fb_create(dev, file, mode_cmd);
}
const struct drm_mode_config_funcs bochs_mode_funcs = {
.fb_create = bochs_gem_fb_create,
};
int bochs_fbdev_init(struct bochs_device *bochs)
{
return drm_fb_helper_fbdev_setup(bochs->dev, &bochs->fb.helper,
&bochs_fb_helper_funcs, 32, 1);
}
void bochs_fbdev_fini(struct bochs_device *bochs)
{
drm_fb_helper_fbdev_teardown(bochs->dev);
}

View file

@ -86,9 +86,16 @@ static int bochs_get_edid_block(void *data, u8 *buf,
int bochs_hw_load_edid(struct bochs_device *bochs)
{
u8 header[8];
if (!bochs->mmio)
return -1;
/* check header to detect whenever edid support is enabled in qemu */
bochs_get_edid_block(bochs, header, 0, ARRAY_SIZE(header));
if (drm_edid_header_is_valid(header) != 8)
return -1;
kfree(bochs->edid);
bochs->edid = drm_do_get_edid(&bochs->connector,
bochs_get_edid_block, bochs);
@ -197,8 +204,7 @@ void bochs_hw_fini(struct drm_device *dev)
}
void bochs_hw_setmode(struct bochs_device *bochs,
struct drm_display_mode *mode,
const struct drm_format_info *format)
struct drm_display_mode *mode)
{
bochs->xres = mode->hdisplay;
bochs->yres = mode->vdisplay;
@ -206,12 +212,8 @@ void bochs_hw_setmode(struct bochs_device *bochs,
bochs->stride = mode->hdisplay * (bochs->bpp / 8);
bochs->yres_virtual = bochs->fb_size / bochs->stride;
DRM_DEBUG_DRIVER("%dx%d @ %d bpp, format %c%c%c%c, vy %d\n",
DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n",
bochs->xres, bochs->yres, bochs->bpp,
(format->format >> 0) & 0xff,
(format->format >> 8) & 0xff,
(format->format >> 16) & 0xff,
(format->format >> 24) & 0xff,
bochs->yres_virtual);
bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
@ -229,6 +231,16 @@ void bochs_hw_setmode(struct bochs_device *bochs,
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
}
void bochs_hw_setformat(struct bochs_device *bochs,
const struct drm_format_info *format)
{
DRM_DEBUG_DRIVER("format %c%c%c%c\n",
(format->format >> 0) & 0xff,
(format->format >> 8) & 0xff,
(format->format >> 16) & 0xff,
(format->format >> 24) & 0xff);
switch (format->format) {
case DRM_FORMAT_XRGB8888:

View file

@ -6,7 +6,10 @@
*/
#include "bochs.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_gem_framebuffer_helper.h>
static int defx = 1024;
static int defy = 768;
@ -18,115 +21,51 @@ MODULE_PARM_DESC(defy, "default y resolution");
/* ---------------------------------------------------------------------- */
static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
{
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
default:
return;
}
}
static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct bochs_device *bochs =
container_of(crtc, struct bochs_device, crtc);
struct bochs_bo *bo;
u64 gpu_addr = 0;
int ret;
if (old_fb) {
bo = gem_to_bochs_bo(old_fb->obj[0]);
ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret) {
DRM_ERROR("failed to reserve old_fb bo\n");
} else {
bochs_bo_unpin(bo);
ttm_bo_unreserve(&bo->bo);
}
}
if (WARN_ON(crtc->primary->fb == NULL))
return -EINVAL;
bo = gem_to_bochs_bo(crtc->primary->fb->obj[0]);
ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret)
return ret;
ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
if (ret) {
ttm_bo_unreserve(&bo->bo);
return ret;
}
ttm_bo_unreserve(&bo->bo);
bochs_hw_setbase(bochs, x, y, gpu_addr);
return 0;
}
static int bochs_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y, struct drm_framebuffer *old_fb)
static void bochs_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct bochs_device *bochs =
container_of(crtc, struct bochs_device, crtc);
if (WARN_ON(crtc->primary->fb == NULL))
return -EINVAL;
bochs_hw_setmode(bochs, mode, crtc->primary->fb->format);
bochs_crtc_mode_set_base(crtc, x, y, old_fb);
return 0;
bochs_hw_setmode(bochs, &crtc->mode);
}
static void bochs_crtc_prepare(struct drm_crtc *crtc)
static void bochs_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
}
static void bochs_crtc_commit(struct drm_crtc *crtc)
static void bochs_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
}
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
static int bochs_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
struct drm_modeset_acquire_ctx *ctx)
{
struct bochs_device *bochs =
container_of(crtc, struct bochs_device, crtc);
struct drm_framebuffer *old_fb = crtc->primary->fb;
unsigned long irqflags;
if (crtc->state && crtc->state->event) {
unsigned long irqflags;
crtc->primary->fb = fb;
bochs_crtc_mode_set_base(crtc, 0, 0, old_fb);
if (event) {
spin_lock_irqsave(&bochs->dev->event_lock, irqflags);
spin_lock_irqsave(&dev->event_lock, irqflags);
event = crtc->state->event;
crtc->state->event = NULL;
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
return 0;
}
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs bochs_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = bochs_crtc_page_flip,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
.dpms = bochs_crtc_dpms,
.mode_set = bochs_crtc_mode_set,
.mode_set_base = bochs_crtc_mode_set_base,
.prepare = bochs_crtc_prepare,
.commit = bochs_crtc_commit,
.mode_set_nofb = bochs_crtc_mode_set_nofb,
.atomic_enable = bochs_crtc_atomic_enable,
.atomic_flush = bochs_crtc_atomic_flush,
};
static const uint32_t bochs_formats[] = {
@ -134,6 +73,59 @@ static const uint32_t bochs_formats[] = {
DRM_FORMAT_BGRX8888,
};
static void bochs_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct bochs_device *bochs = plane->dev->dev_private;
struct bochs_bo *bo;
if (!plane->state->fb)
return;
bo = gem_to_bochs_bo(plane->state->fb->obj[0]);
bochs_hw_setbase(bochs,
plane->state->crtc_x,
plane->state->crtc_y,
bo->bo.offset);
bochs_hw_setformat(bochs, plane->state->fb->format);
}
static int bochs_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct bochs_bo *bo;
if (!new_state->fb)
return 0;
bo = gem_to_bochs_bo(new_state->fb->obj[0]);
return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
}
static void bochs_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct bochs_bo *bo;
if (!old_state->fb)
return;
bo = gem_to_bochs_bo(old_state->fb->obj[0]);
bochs_bo_unpin(bo);
}
static const struct drm_plane_helper_funcs bochs_plane_helper_funcs = {
.atomic_update = bochs_plane_atomic_update,
.prepare_fb = bochs_plane_prepare_fb,
.cleanup_fb = bochs_plane_cleanup_fb,
};
static const struct drm_plane_funcs bochs_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_primary_helper_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static struct drm_plane *bochs_primary_plane(struct drm_device *dev)
{
struct drm_plane *primary;
@ -146,16 +138,17 @@ static struct drm_plane *bochs_primary_plane(struct drm_device *dev)
}
ret = drm_universal_plane_init(dev, primary, 0,
&drm_primary_helper_funcs,
&bochs_plane_funcs,
bochs_formats,
ARRAY_SIZE(bochs_formats),
NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
primary = NULL;
return NULL;
}
drm_plane_helper_add(primary, &bochs_plane_helper_funcs);
return primary;
}
@ -170,31 +163,6 @@ static void bochs_crtc_init(struct drm_device *dev)
drm_crtc_helper_add(crtc, &bochs_helper_funcs);
}
static void bochs_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static void bochs_encoder_dpms(struct drm_encoder *encoder, int state)
{
}
static void bochs_encoder_prepare(struct drm_encoder *encoder)
{
}
static void bochs_encoder_commit(struct drm_encoder *encoder)
{
}
static const struct drm_encoder_helper_funcs bochs_encoder_helper_funcs = {
.dpms = bochs_encoder_dpms,
.mode_set = bochs_encoder_mode_set,
.prepare = bochs_encoder_prepare,
.commit = bochs_encoder_commit,
};
static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@ -207,7 +175,6 @@ static void bochs_encoder_init(struct drm_device *dev)
encoder->possible_crtcs = 0x1;
drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(encoder, &bochs_encoder_helper_funcs);
}
@ -266,6 +233,9 @@ static const struct drm_connector_funcs bochs_connector_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static void bochs_connector_init(struct drm_device *dev)
@ -287,6 +257,22 @@ static void bochs_connector_init(struct drm_device *dev)
}
}
static struct drm_framebuffer *
bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 &&
mode_cmd->pixel_format != DRM_FORMAT_BGRX8888)
return ERR_PTR(-EINVAL);
return drm_gem_fb_create(dev, file, mode_cmd);
}
const struct drm_mode_config_funcs bochs_mode_funcs = {
.fb_create = bochs_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
int bochs_kms_init(struct bochs_device *bochs)
{
@ -309,6 +295,8 @@ int bochs_kms_init(struct bochs_device *bochs)
drm_connector_attach_encoder(&bochs->connector,
&bochs->encoder);
drm_mode_config_reset(bochs->dev);
return 0;
}

View file

@ -210,33 +210,28 @@ static void bochs_ttm_placement(struct bochs_bo *bo, int domain)
bo->placement.num_busy_placement = c;
}
static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo)
{
return bo->bo.offset;
}
int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag)
{
struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
bo->pin_count++;
if (gpu_addr)
*gpu_addr = bochs_bo_gpu_offset(bo);
return 0;
}
bochs_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret)
return ret;
ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
ttm_bo_unreserve(&bo->bo);
if (ret)
return ret;
bo->pin_count = 1;
if (gpu_addr)
*gpu_addr = bochs_bo_gpu_offset(bo);
return 0;
}
@ -256,7 +251,11 @@ int bochs_bo_unpin(struct bochs_bo *bo)
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret)
return ret;
ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
ttm_bo_unreserve(&bo->bo);
if (ret)
return ret;
@ -396,3 +395,52 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
drm_gem_object_put_unlocked(obj);
return 0;
}
/* ---------------------------------------------------------------------- */
int bochs_gem_prime_pin(struct drm_gem_object *obj)
{
struct bochs_bo *bo = gem_to_bochs_bo(obj);
return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
}
void bochs_gem_prime_unpin(struct drm_gem_object *obj)
{
struct bochs_bo *bo = gem_to_bochs_bo(obj);
bochs_bo_unpin(bo);
}
void *bochs_gem_prime_vmap(struct drm_gem_object *obj)
{
struct bochs_bo *bo = gem_to_bochs_bo(obj);
bool is_iomem;
int ret;
ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
if (ret)
return NULL;
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
bochs_bo_unpin(bo);
return NULL;
}
return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
}
void bochs_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
struct bochs_bo *bo = gem_to_bochs_bo(obj);
ttm_bo_kunmap(&bo->kmap);
bochs_bo_unpin(bo);
}
int bochs_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct bochs_bo *bo = gem_to_bochs_bo(obj);
return ttm_fbdev_mmap(vma, &bo->bo);
}

View file

@ -395,7 +395,7 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
#ifdef CONFIG_DRM_I2C_ADV7533
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);
@ -411,7 +411,7 @@ static inline void adv7533_dsi_power_off(struct adv7511 *adv)
}
static inline void adv7533_mode_set(struct adv7511 *adv,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
}

View file

@ -676,8 +676,8 @@ static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
}
static void adv7511_mode_set(struct adv7511 *adv7511,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adj_mode)
{
unsigned int low_refresh_rate;
unsigned int hsync_polarity = 0;
@ -839,8 +839,8 @@ static void adv7511_bridge_disable(struct drm_bridge *bridge)
}
static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adj_mode)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);

View file

@ -108,7 +108,7 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
regmap_write(adv->regmap_cec, 0x27, 0x0b);
}
void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode)
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode)
{
struct mipi_dsi_device *dsi = adv->dsi;
int lanes, ret;

View file

@ -1082,8 +1082,8 @@ static void anx78xx_bridge_disable(struct drm_bridge *bridge)
}
static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
struct hdmi_avi_infoframe frame;
@ -1094,8 +1094,9 @@ static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
mutex_lock(&anx78xx->lock);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode,
false);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
&anx78xx->connector,
adjusted_mode);
if (err) {
DRM_ERROR("Failed to setup AVI infoframe: %d\n", err);
goto unlock;

View file

@ -1361,8 +1361,8 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
}
static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *orig_mode,
struct drm_display_mode *mode)
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
{
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_display_info *display_info = &dp->connector.display_info;

View file

@ -134,8 +134,8 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
};
/**
* drm_panel_bridge_add - Creates a drm_bridge and drm_connector that
* just calls the appropriate functions from drm_panel.
* drm_panel_bridge_add - Creates a &drm_bridge and &drm_connector that
* just calls the appropriate functions from &drm_panel.
*
* @panel: The drm_panel being wrapped. Must be non-NULL.
* @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
@ -149,9 +149,12 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* passed to drm_bridge_attach(). The drm_panel_prepare() and related
* functions can be dropped from the encoder driver (they're now
* called by the KMS helpers before calling into the encoder), along
* with connector creation. When done with the bridge,
* drm_bridge_detach() should be called as normal, then
* with connector creation. When done with the bridge (after
* drm_mode_config_cleanup() if the bridge has already been attached), then
* drm_panel_bridge_remove() to free it.
*
* See devm_drm_panel_bridge_add() for an automatically manged version of this
* function.
*/
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
u32 connector_type)
@ -210,6 +213,17 @@ static void devm_drm_panel_bridge_release(struct device *dev, void *res)
drm_panel_bridge_remove(*bridge);
}
/**
* devm_drm_panel_bridge_add - Creates a managed &drm_bridge and &drm_connector
* that just calls the appropriate functions from &drm_panel.
* @dev: device to tie the bridge lifetime to
* @panel: The drm_panel being wrapped. Must be non-NULL.
* @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be
* created.
*
* This is the managed version of drm_panel_bridge_add() which automatically
* calls drm_panel_bridge_remove() when @dev is unbound.
*/
struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
struct drm_panel *panel,
u32 connector_type)

View file

@ -232,8 +232,8 @@ static void sii902x_bridge_enable(struct drm_bridge *bridge)
}
static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adj)
const struct drm_display_mode *mode,
const struct drm_display_mode *adj)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
struct regmap *regmap = sii902x->regmap;
@ -258,7 +258,8 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
if (ret)
return;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj, false);
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame,
&sii902x->connector, adj);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;

View file

@ -1104,8 +1104,7 @@ static void sii8620_set_infoframes(struct sii8620 *ctx,
int ret;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
mode,
true);
NULL, mode);
if (ctx->use_packed_pixel)
frm.avi.colorspace = HDMI_COLORSPACE_YUV422;

View file

@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/*
* dw-hdmi-i2s-audio.c
*
* Copyright (c) 2017 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <drm/bridge/dw_hdmi.h>
#include <sound/hdmi-codec.h>

View file

@ -1344,7 +1344,8 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
u8 val;
/* Initialise info frame from DRM mode */
drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
drm_hdmi_avi_infoframe_from_display_mode(&frame,
&hdmi->connector, mode);
if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
frame.colorspace = HDMI_COLORSPACE_YUV444;
@ -1998,8 +1999,8 @@ dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
}
static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *orig_mode,
struct drm_display_mode *mode)
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
{
struct dw_hdmi *hdmi = bridge->driver_private;

View file

@ -248,7 +248,7 @@ static inline bool dw_mipi_is_dual_mode(struct dw_mipi_dsi *dsi)
* The controller should generate 2 frames before
* preparing the peripheral.
*/
static void dw_mipi_dsi_wait_for_two_frames(struct drm_display_mode *mode)
static void dw_mipi_dsi_wait_for_two_frames(const struct drm_display_mode *mode)
{
int refresh, two_frames;
@ -564,7 +564,7 @@ static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
}
static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
u32 val = 0, color = 0;
@ -607,7 +607,7 @@ static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
}
static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
/*
* TODO dw drv improvements
@ -642,7 +642,7 @@ static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
/* Get lane byte clock cycles. */
static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
u32 hcomponent)
{
u32 frac, lbcc;
@ -658,7 +658,7 @@ static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
}
static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
u32 htotal, hsa, hbp, lbcc;
@ -681,7 +681,7 @@ static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
}
static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
u32 vactive, vsa, vfp, vbp;
@ -818,7 +818,7 @@ static unsigned int dw_mipi_dsi_get_lanes(struct dw_mipi_dsi *dsi)
}
static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *adjusted_mode)
{
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
void *priv_data = dsi->plat_data->priv_data;
@ -861,8 +861,8 @@ static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
}
static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);

View file

@ -203,7 +203,7 @@ struct tc_data {
/* display edid */
struct edid *edid;
/* current mode */
struct drm_display_mode *mode;
const struct drm_display_mode *mode;
u32 rev;
u8 assr;
@ -648,7 +648,8 @@ static int tc_get_display_props(struct tc_data *tc)
return ret;
}
static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
static int tc_set_video_mode(struct tc_data *tc,
const struct drm_display_mode *mode)
{
int ret;
int vid_sync_dly;
@ -1113,8 +1114,8 @@ static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connec
}
static void tc_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adj)
const struct drm_display_mode *mode,
const struct drm_display_mode *adj)
{
struct tc_data *tc = bridge_to_tc(bridge);

View file

@ -10,6 +10,7 @@
*/
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_util.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
@ -256,6 +257,8 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
{
struct drm_framebuffer *gfb = gfbdev->gfb;
drm_helper_force_disable_all(dev);
drm_fb_helper_unregister_fbi(&gfbdev->helper);
vfree(gfbdev->sysram);

View file

@ -698,6 +698,7 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
/**
* drm_atomic_private_obj_init - initialize private object
* @dev: DRM device this object will be attached to
* @obj: private object
* @state: initial private object state
* @funcs: pointer to the struct of function pointers that identify the object
@ -707,14 +708,18 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
* driver private object that needs its own atomic state.
*/
void
drm_atomic_private_obj_init(struct drm_private_obj *obj,
drm_atomic_private_obj_init(struct drm_device *dev,
struct drm_private_obj *obj,
struct drm_private_state *state,
const struct drm_private_state_funcs *funcs)
{
memset(obj, 0, sizeof(*obj));
drm_modeset_lock_init(&obj->lock);
obj->state = state;
obj->funcs = funcs;
list_add_tail(&obj->head, &dev->mode_config.privobj_list);
}
EXPORT_SYMBOL(drm_atomic_private_obj_init);
@ -727,7 +732,9 @@ EXPORT_SYMBOL(drm_atomic_private_obj_init);
void
drm_atomic_private_obj_fini(struct drm_private_obj *obj)
{
list_del(&obj->head);
obj->funcs->atomic_destroy_state(obj, obj->state);
drm_modeset_lock_fini(&obj->lock);
}
EXPORT_SYMBOL(drm_atomic_private_obj_fini);
@ -737,8 +744,8 @@ EXPORT_SYMBOL(drm_atomic_private_obj_fini);
* @obj: private object to get the state for
*
* This function returns the private object state for the given private object,
* allocating the state if needed. It does not grab any locks as the caller is
* expected to care of any required locking.
* allocating the state if needed. It will also grab the relevant private
* object lock to make sure that the state is consistent.
*
* RETURNS:
*
@ -748,7 +755,7 @@ struct drm_private_state *
drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
int index, num_objs, i;
int index, num_objs, i, ret;
size_t size;
struct __drm_private_objs_state *arr;
struct drm_private_state *obj_state;
@ -757,6 +764,10 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
if (obj == state->private_objs[i].ptr)
return state->private_objs[i].state;
ret = drm_modeset_lock(&obj->lock, state->acquire_ctx);
if (ret)
return ERR_PTR(ret);
num_objs = state->num_private_objs + 1;
size = sizeof(*state->private_objs) * num_objs;
arr = krealloc(state->private_objs, size, GFP_KERNEL);

View file

@ -294,8 +294,8 @@ EXPORT_SYMBOL(drm_bridge_post_disable);
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
if (!bridge)
return;

View file

@ -1138,7 +1138,71 @@ void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);
/**
* drm_create_tv_properties - create TV specific connector properties
* drm_mode_attach_tv_margin_properties - attach TV connector margin properties
* @connector: DRM connector
*
* Called by a driver when it needs to attach TV margin props to a connector.
* Typically used on SDTV and HDMI connectors.
*/
void drm_connector_attach_tv_margin_properties(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
drm_object_attach_property(&connector->base,
dev->mode_config.tv_left_margin_property,
0);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_right_margin_property,
0);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_top_margin_property,
0);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
0);
}
EXPORT_SYMBOL(drm_connector_attach_tv_margin_properties);
/**
* drm_mode_create_tv_margin_properties - create TV connector margin properties
* @dev: DRM device
*
* Called by a driver's HDMI connector initialization routine, this function
* creates the TV margin properties for a given device. No need to call this
* function for an SDTV connector, it's already called from
* drm_mode_create_tv_properties().
*/
int drm_mode_create_tv_margin_properties(struct drm_device *dev)
{
if (dev->mode_config.tv_left_margin_property)
return 0;
dev->mode_config.tv_left_margin_property =
drm_property_create_range(dev, 0, "left margin", 0, 100);
if (!dev->mode_config.tv_left_margin_property)
return -ENOMEM;
dev->mode_config.tv_right_margin_property =
drm_property_create_range(dev, 0, "right margin", 0, 100);
if (!dev->mode_config.tv_right_margin_property)
return -ENOMEM;
dev->mode_config.tv_top_margin_property =
drm_property_create_range(dev, 0, "top margin", 0, 100);
if (!dev->mode_config.tv_top_margin_property)
return -ENOMEM;
dev->mode_config.tv_bottom_margin_property =
drm_property_create_range(dev, 0, "bottom margin", 0, 100);
if (!dev->mode_config.tv_bottom_margin_property)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_tv_margin_properties);
/**
* drm_mode_create_tv_properties - create TV specific connector properties
* @dev: DRM device
* @num_modes: number of different TV formats (modes) supported
* @modes: array of pointers to strings containing name of each format
@ -1183,24 +1247,7 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
/*
* Other, TV specific properties: margins & TV modes.
*/
dev->mode_config.tv_left_margin_property =
drm_property_create_range(dev, 0, "left margin", 0, 100);
if (!dev->mode_config.tv_left_margin_property)
goto nomem;
dev->mode_config.tv_right_margin_property =
drm_property_create_range(dev, 0, "right margin", 0, 100);
if (!dev->mode_config.tv_right_margin_property)
goto nomem;
dev->mode_config.tv_top_margin_property =
drm_property_create_range(dev, 0, "top margin", 0, 100);
if (!dev->mode_config.tv_top_margin_property)
goto nomem;
dev->mode_config.tv_bottom_margin_property =
drm_property_create_range(dev, 0, "bottom margin", 0, 100);
if (!dev->mode_config.tv_bottom_margin_property)
if (drm_mode_create_tv_margin_properties(dev))
goto nomem;
dev->mode_config.tv_mode_property =
@ -2077,7 +2124,7 @@ EXPORT_SYMBOL(drm_mode_get_tile_group);
* identifier for the tile group.
*
* RETURNS:
* new tile group or error.
* new tile group or NULL.
*/
struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
char topology[8])
@ -2087,7 +2134,7 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
tg = kzalloc(sizeof(*tg), GFP_KERNEL);
if (!tg)
return ERR_PTR(-ENOMEM);
return NULL;
kref_init(&tg->refcount);
memcpy(tg->group_data, topology, 8);
@ -2099,7 +2146,7 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
tg->id = ret;
} else {
kfree(tg);
tg = ERR_PTR(ret);
tg = NULL;
}
mutex_unlock(&dev->mode_config.idr_mutex);

View file

@ -361,23 +361,26 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
{
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
int tmp_handle;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
ctx->handle = drm_legacy_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
tmp_handle = drm_legacy_ctxbitmap_next(dev);
if (tmp_handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
ctx->handle = drm_legacy_ctxbitmap_next(dev);
tmp_handle = drm_legacy_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle < 0) {
DRM_DEBUG("%d\n", tmp_handle);
if (tmp_handle < 0) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return -ENOMEM;
return tmp_handle;
}
ctx->handle = tmp_handle;
ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
if (!ctx_entry) {
DRM_DEBUG("out of memory\n");

View file

@ -93,15 +93,6 @@ struct drm_crtc *drm_crtc_from_index(struct drm_device *dev, int idx)
}
EXPORT_SYMBOL(drm_crtc_from_index);
/**
* drm_crtc_force_disable - Forcibly turn off a CRTC
* @crtc: CRTC to turn off
*
* Note: This should only be used by non-atomic legacy drivers.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_crtc_force_disable(struct drm_crtc *crtc)
{
struct drm_mode_set set = {
@ -112,38 +103,6 @@ int drm_crtc_force_disable(struct drm_crtc *crtc)
return drm_mode_set_config_internal(&set);
}
EXPORT_SYMBOL(drm_crtc_force_disable);
/**
* drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs
* @dev: DRM device whose CRTCs to turn off
*
* Drivers may want to call this on unload to ensure that all displays are
* unlit and the GPU is in a consistent, low power state. Takes modeset locks.
*
* Note: This should only be used by non-atomic legacy drivers. For an atomic
* version look at drm_atomic_helper_shutdown().
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_crtc_force_disable_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
int ret = 0;
drm_modeset_lock_all(dev);
drm_for_each_crtc(crtc, dev)
if (crtc->enabled) {
ret = drm_crtc_force_disable(crtc);
if (ret)
goto out;
}
out:
drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_crtc_force_disable_all);
static unsigned int drm_num_crtcs(struct drm_device *dev)
{

View file

@ -93,6 +93,8 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
struct drm_connector_list_iter conn_iter;
struct drm_device *dev = encoder->dev;
WARN_ON(drm_drv_uses_atomic_modeset(dev));
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
@ -131,6 +133,8 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
WARN_ON(drm_drv_uses_atomic_modeset(dev));
/*
* We can expect this mutex to be locked if we are not panicking.
* Locking is currently fubar in the panic handler.
@ -212,8 +216,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
*/
void drm_helper_disable_unused_functions(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_ATOMIC))
DRM_ERROR("Called for atomic driver, this is not what you want.\n");
WARN_ON(drm_drv_uses_atomic_modeset(dev));
drm_modeset_lock_all(dev);
__drm_helper_disable_unused_functions(dev);
@ -281,6 +284,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_encoder *encoder;
bool ret = true;
WARN_ON(drm_drv_uses_atomic_modeset(dev));
drm_warn_on_modeset_not_all_locked(dev);
saved_enabled = crtc->enabled;
@ -386,9 +391,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!encoder_funcs)
continue;
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
encoder->base.id, encoder->name,
mode->base.id, mode->name);
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%s]\n",
encoder->base.id, encoder->name, mode->name);
if (encoder_funcs->mode_set)
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
@ -540,6 +544,9 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
crtc_funcs = set->crtc->helper_private;
dev = set->crtc->dev;
WARN_ON(drm_drv_uses_atomic_modeset(dev));
if (!set->mode)
set->fb = NULL;
@ -555,8 +562,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set,
return 0;
}
dev = set->crtc->dev;
drm_warn_on_modeset_not_all_locked(dev);
/*
@ -875,6 +880,8 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode)
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
WARN_ON(drm_drv_uses_atomic_modeset(connector->dev));
if (mode == connector->dpms)
return 0;
@ -946,6 +953,8 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
int encoder_dpms;
bool ret;
WARN_ON(drm_drv_uses_atomic_modeset(dev));
drm_modeset_lock_all(dev);
drm_for_each_crtc(crtc, dev) {
@ -984,3 +993,38 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
/**
* drm_helper_force_disable_all - Forcibly turn off all enabled CRTCs
* @dev: DRM device whose CRTCs to turn off
*
* Drivers may want to call this on unload to ensure that all displays are
* unlit and the GPU is in a consistent, low power state. Takes modeset locks.
*
* Note: This should only be used by non-atomic legacy drivers. For an atomic
* version look at drm_atomic_helper_shutdown().
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_helper_force_disable_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
int ret = 0;
drm_modeset_lock_all(dev);
drm_for_each_crtc(crtc, dev)
if (crtc->enabled) {
struct drm_mode_set set = {
.crtc = crtc,
};
ret = drm_mode_set_config_internal(&set);
if (ret)
goto out;
}
out:
drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_helper_force_disable_all);

View file

@ -50,6 +50,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
const struct drm_framebuffer *fb);
int drm_crtc_register_all(struct drm_device *dev);
void drm_crtc_unregister_all(struct drm_device *dev);
int drm_crtc_force_disable(struct drm_crtc *crtc);
struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc);

View file

@ -154,6 +154,7 @@ u8 drm_dp_link_rate_to_bw_code(int link_rate)
default:
WARN(1, "unknown DP link rate %d, using %x\n", link_rate,
DP_LINK_BW_1_62);
/* fall through */
case 162000:
return DP_LINK_BW_1_62;
case 270000:
@ -171,6 +172,7 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw)
switch (link_bw) {
default:
WARN(1, "unknown DP link BW code %x, using 162000\n", link_bw);
/* fall through */
case DP_LINK_BW_1_62:
return 162000;
case DP_LINK_BW_2_7:
@ -552,6 +554,7 @@ int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
case DP_DS_16BPC:
return 16;
}
/* fall through */
default:
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -41,7 +41,6 @@
#include "drm_crtc_internal.h"
#include "drm_legacy.h"
#include "drm_internal.h"
#include "drm_crtc_internal.h"
/*
* drm_debug: Enable debug output.
@ -265,14 +264,13 @@ void drm_minor_release(struct drm_minor *minor)
* DOC: driver instance overview
*
* A device instance for a drm driver is represented by &struct drm_device. This
* is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
* is initialized with drm_dev_init(), usually from bus-specific ->probe()
* callbacks implemented by the driver. The driver then needs to initialize all
* the various subsystems for the drm device like memory management, vblank
* handling, modesetting support and intial output configuration plus obviously
* initialize all the corresponding hardware bits. An important part of this is
* also calling drm_dev_set_unique() to set the userspace-visible unique name of
* this device instance. Finally when everything is up and running and ready for
* userspace the device instance can be published using drm_dev_register().
* initialize all the corresponding hardware bits. Finally when everything is up
* and running and ready for userspace the device instance can be published
* using drm_dev_register().
*
* There is also deprecated support for initalizing device instances using
* bus-specific helpers and the &drm_driver.load callback. But due to
@ -288,9 +286,6 @@ void drm_minor_release(struct drm_minor *minor)
* Note that the lifetime rules for &drm_device instance has still a lot of
* historical baggage. Hence use the reference counting provided by
* drm_dev_get() and drm_dev_put() only carefully.
*
* It is recommended that drivers embed &struct drm_device into their own device
* structure, which is supported through drm_dev_init().
*/
/**
@ -476,6 +471,9 @@ static void drm_fs_inode_free(struct inode *inode)
* The initial ref-count of the object is 1. Use drm_dev_get() and
* drm_dev_put() to take and drop further ref-counts.
*
* It is recommended that drivers embed &struct drm_device into their own device
* structure.
*
* Drivers that do not want to allocate their own device struct
* embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
* that do embed &struct drm_device it must be placed first in the overall
@ -766,7 +764,7 @@ static void remove_compat_control_link(struct drm_device *dev)
* @flags: Flags passed to the driver's .load() function
*
* Register the DRM device @dev with the system, advertise device to user-space
* and start normal device operation. @dev must be allocated via drm_dev_alloc()
* and start normal device operation. @dev must be initialized via drm_dev_init()
* previously.
*
* Never call this twice on any device!
@ -878,9 +876,9 @@ EXPORT_SYMBOL(drm_dev_unregister);
* @dev: device of which to set the unique name
* @name: unique name
*
* Sets the unique name of a DRM device using the specified string. Drivers
* can use this at driver probe time if the unique name of the devices they
* drive is static.
* Sets the unique name of a DRM device using the specified string. This is
* already done by drm_dev_init(), drivers should only override the default
* unique name for backwards compatibility reasons.
*
* Return: 0 on success or a negative error code on failure.
*/

View file

@ -3641,6 +3641,20 @@ static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
return oui == HDMI_FORUM_IEEE_OUI;
}
static bool cea_db_is_vcdb(const u8 *db)
{
if (cea_db_tag(db) != USE_EXTENDED_TAG)
return false;
if (cea_db_payload_len(db) != 2)
return false;
if (cea_db_extended_tag(db) != EXT_VIDEO_CAPABILITY_BLOCK)
return false;
return true;
}
static bool cea_db_is_y420cmdb(const u8 *db)
{
if (cea_db_tag(db) != USE_EXTENDED_TAG)
@ -4223,41 +4237,6 @@ bool drm_detect_monitor_audio(struct edid *edid)
}
EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
* drm_rgb_quant_range_selectable - is RGB quantization range selectable?
* @edid: EDID block to scan
*
* Check whether the monitor reports the RGB quantization range selection
* as supported. The AVI infoframe can then be used to inform the monitor
* which quantization range (full or limited) is used.
*
* Return: True if the RGB quantization range is selectable, false otherwise.
*/
bool drm_rgb_quant_range_selectable(struct edid *edid)
{
u8 *edid_ext;
int i, start, end;
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
return false;
if (cea_db_offsets(edid_ext, &start, &end))
return false;
for_each_cea_db(edid_ext, i, start, end) {
if (cea_db_tag(&edid_ext[i]) == USE_EXTENDED_TAG &&
cea_db_payload_len(&edid_ext[i]) == 2 &&
cea_db_extended_tag(&edid_ext[i]) ==
EXT_VIDEO_CAPABILITY_BLOCK) {
DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
}
}
return false;
}
EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
/**
* drm_default_rgb_quant_range - default RGB quantization range
@ -4278,6 +4257,16 @@ drm_default_rgb_quant_range(const struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_default_rgb_quant_range);
static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
{
struct drm_display_info *info = &connector->display_info;
DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", db[2]);
if (db[2] & EDID_CEA_VCDB_QS)
info->rgb_quant_range_selectable = true;
}
static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
const u8 *db)
{
@ -4452,6 +4441,8 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
drm_parse_hdmi_forum_vsdb(connector, db);
if (cea_db_is_y420cmdb(db))
drm_parse_y420cmdb_bitmap(connector, db);
if (cea_db_is_vcdb(db))
drm_parse_vcdb(connector, db);
}
}
@ -4472,6 +4463,7 @@ drm_reset_display_info(struct drm_connector *connector)
info->max_tmds_clock = 0;
info->dvi_dual = false;
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
info->non_desktop = 0;
@ -4830,19 +4822,32 @@ void drm_set_preferred_mode(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_set_preferred_mode);
static bool is_hdmi2_sink(struct drm_connector *connector)
{
/*
* FIXME: sil-sii8620 doesn't have a connector around when
* we need one, so we have to be prepared for a NULL connector.
*/
if (!connector)
return true;
return connector->display_info.hdmi.scdc.supported ||
connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
}
/**
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
* @frame: HDMI AVI infoframe
* @connector: the connector
* @mode: DRM display mode
* @is_hdmi2_sink: Sink is HDMI 2.0 compliant
*
* Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode,
bool is_hdmi2_sink)
struct drm_connector *connector,
const struct drm_display_mode *mode)
{
enum hdmi_picture_aspect picture_aspect;
int err;
@ -4864,7 +4869,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
* HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
* have to make sure we dont break HDMI 1.4 sinks.
*/
if (!is_hdmi2_sink && frame->video_code > 64)
if (!is_hdmi2_sink(connector) && frame->video_code > 64)
frame->video_code = 0;
/*
@ -4923,22 +4928,18 @@ EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
* drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe
* quantization range information
* @frame: HDMI AVI infoframe
* @connector: the connector
* @mode: DRM display mode
* @rgb_quant_range: RGB quantization range (Q)
* @rgb_quant_range_selectable: Sink support selectable RGB quantization range (QS)
* @is_hdmi2_sink: HDMI 2.0 sink, which has different default recommendations
*
* Note that @is_hdmi2_sink can be derived by looking at the
* &drm_scdc.supported flag stored in &drm_hdmi_info.scdc,
* &drm_display_info.hdmi, which can be found in &drm_connector.display_info.
*/
void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
struct drm_connector *connector,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range,
bool rgb_quant_range_selectable,
bool is_hdmi2_sink)
enum hdmi_quantization_range rgb_quant_range)
{
const struct drm_display_info *info = &connector->display_info;
/*
* CEA-861:
* "A Source shall not send a non-zero Q value that does not correspond
@ -4949,7 +4950,7 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
* HDMI 2.0 recommends sending non-zero Q when it does match the
* default RGB quantization range for the mode, even when QS=0.
*/
if (rgb_quant_range_selectable ||
if (info->rgb_quant_range_selectable ||
rgb_quant_range == drm_default_rgb_quant_range(mode))
frame->quantization_range = rgb_quant_range;
else
@ -4968,7 +4969,7 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
* we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
* on on CEA-861-F.
*/
if (!is_hdmi2_sink ||
if (!is_hdmi2_sink(connector) ||
rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
frame->ycc_quantization_range =
HDMI_YCC_QUANTIZATION_RANGE_LIMITED;

View file

@ -1797,6 +1797,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int i;
struct drm_fb_helper_surface_size sizes;
int gamma_size = 0;
int best_depth = 0;
memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
sizes.surface_depth = 24;
@ -1804,7 +1805,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
sizes.fb_width = (u32)-1;
sizes.fb_height = (u32)-1;
/* if driver picks 8 or 16 by default use that for both depth/bpp */
/*
* If driver picks 8 or 16 by default use that for both depth/bpp
* to begin with
*/
if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
@ -1839,6 +1843,55 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
}
/*
* If we run into a situation where, for example, the primary plane
* supports RGBA5551 (16 bpp, depth 15) but not RGB565 (16 bpp, depth
* 16) we need to scale down the depth of the sizes we request.
*/
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
struct drm_crtc *crtc = mode_set->crtc;
struct drm_plane *plane = crtc->primary;
int j;
DRM_DEBUG("test CRTC %d primary plane\n", i);
for (j = 0; j < plane->format_count; j++) {
const struct drm_format_info *fmt;
fmt = drm_format_info(plane->format_types[j]);
/*
* Do not consider YUV or other complicated formats
* for framebuffers. This means only legacy formats
* are supported (fmt->depth is a legacy field) but
* the framebuffer emulation can only deal with such
* formats, specifically RGB/BGA formats.
*/
if (fmt->depth == 0)
continue;
/* We found a perfect fit, great */
if (fmt->depth == sizes.surface_depth) {
best_depth = fmt->depth;
break;
}
/* Skip depths above what we're looking for */
if (fmt->depth > sizes.surface_depth)
continue;
/* Best depth found so far */
if (fmt->depth > best_depth)
best_depth = fmt->depth;
}
}
if (sizes.surface_depth != best_depth) {
DRM_INFO("requested bpp %d, scaled depth down to %d",
sizes.surface_bpp, best_depth);
sizes.surface_depth = best_depth;
}
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
@ -2866,7 +2919,7 @@ int drm_fb_helper_fbdev_setup(struct drm_device *dev,
return 0;
err_drm_fb_helper_fini:
drm_fb_helper_fini(fb_helper);
drm_fb_helper_fbdev_teardown(dev);
return ret;
}
@ -2961,18 +3014,16 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
return 0;
}
/*
* fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
* unregister_framebuffer() or fb_release().
*/
static void drm_fbdev_fb_destroy(struct fb_info *info)
static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
{
struct drm_fb_helper *fb_helper = info->par;
struct fb_info *fbi = fb_helper->fbdev;
struct fb_ops *fbops = NULL;
void *shadow = NULL;
if (fbi->fbdefio) {
if (!fb_helper->dev)
return;
if (fbi && fbi->fbdefio) {
fb_deferred_io_cleanup(fbi);
shadow = fbi->screen_buffer;
fbops = fbi->fbops;
@ -2986,6 +3037,12 @@ static void drm_fbdev_fb_destroy(struct fb_info *info)
}
drm_client_framebuffer_delete(fb_helper->buffer);
}
static void drm_fbdev_release(struct drm_fb_helper *fb_helper)
{
drm_fbdev_cleanup(fb_helper);
/*
* FIXME:
* Remove conditional when all CMA drivers have been moved over to using
@ -2997,6 +3054,15 @@ static void drm_fbdev_fb_destroy(struct fb_info *info)
}
}
/*
* fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
* unregister_framebuffer() or fb_release().
*/
static void drm_fbdev_fb_destroy(struct fb_info *info)
{
drm_fbdev_release(info->par);
}
static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *fb_helper = info->par;
@ -3047,7 +3113,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
struct drm_framebuffer *fb;
struct fb_info *fbi;
u32 format;
int ret;
DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
@ -3064,10 +3129,8 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fb = buffer->fb;
fbi = drm_fb_helper_alloc_fbi(fb_helper);
if (IS_ERR(fbi)) {
ret = PTR_ERR(fbi);
goto err_free_buffer;
}
if (IS_ERR(fbi))
return PTR_ERR(fbi);
fbi->par = fb_helper;
fbi->fbops = &drm_fbdev_fb_ops;
@ -3098,8 +3161,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
if (!fbops || !shadow) {
kfree(fbops);
vfree(shadow);
ret = -ENOMEM;
goto err_fb_info_destroy;
return -ENOMEM;
}
*fbops = *fbi->fbops;
@ -3111,13 +3173,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
}
return 0;
err_fb_info_destroy:
drm_fb_helper_fini(fb_helper);
err_free_buffer:
drm_client_framebuffer_delete(buffer);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_generic_probe);
@ -3129,18 +3184,11 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (fb_helper->fbdev) {
drm_fb_helper_unregister_fbi(fb_helper);
if (fb_helper->fbdev)
/* drm_fbdev_fb_destroy() takes care of cleanup */
return;
}
/* Did drm_fb_helper_fbdev_setup() run? */
if (fb_helper->dev)
drm_fb_helper_fini(fb_helper);
drm_client_release(client);
kfree(fb_helper);
drm_fb_helper_unregister_fbi(fb_helper);
else
drm_fbdev_release(fb_helper);
}
static int drm_fbdev_client_restore(struct drm_client_dev *client)
@ -3158,7 +3206,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
struct drm_device *dev = client->dev;
int ret;
/* If drm_fb_helper_fbdev_setup() failed, we only try once */
/* Setup is not retried if it has failed */
if (!fb_helper->dev && fb_helper->funcs)
return 0;
@ -3170,15 +3218,34 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
return 0;
}
ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs,
fb_helper->preferred_bpp, 0);
if (ret) {
fb_helper->dev = NULL;
fb_helper->fbdev = NULL;
return ret;
}
drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
ret = drm_fb_helper_init(dev, fb_helper, dev->mode_config.num_connector);
if (ret)
goto err;
ret = drm_fb_helper_single_add_all_connectors(fb_helper);
if (ret)
goto err_cleanup;
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp);
if (ret)
goto err_cleanup;
return 0;
err_cleanup:
drm_fbdev_cleanup(fb_helper);
err:
fb_helper->dev = NULL;
fb_helper->fbdev = NULL;
DRM_DEV_ERROR(dev->dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
return ret;
}
static const struct drm_client_funcs drm_fbdev_client_funcs = {
@ -3237,6 +3304,10 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
drm_client_add(&fb_helper->client);
if (!preferred_bpp)
preferred_bpp = dev->mode_config.preferred_depth;
if (!preferred_bpp)
preferred_bpp = 32;
fb_helper->preferred_bpp = preferred_bpp;
ret = drm_fbdev_client_hotplug(&fb_helper->client);

View file

@ -22,6 +22,7 @@
*/
#include <drm/drmP.h>
#include <drm/drm_util.h>
#include <drm/drm_flip_work.h>
/**

View file

@ -27,6 +27,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_print.h>
#include <drm/drm_util.h>
#include "drm_internal.h"
#include "drm_crtc_internal.h"

View file

@ -37,6 +37,7 @@
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <linux/mem_encrypt.h>
#include <linux/pagevec.h>
#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
@ -526,6 +527,17 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
* ref count of those pages.
*/
static void drm_gem_check_release_pagevec(struct pagevec *pvec)
{
check_move_unevictable_pages(pvec);
__pagevec_release(pvec);
cond_resched();
}
/**
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
@ -551,6 +563,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{
struct address_space *mapping;
struct page *p, **pages;
struct pagevec pvec;
int i, npages;
/* This is the shared memory object that backs the GEM resource */
@ -568,6 +581,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
if (pages == NULL)
return ERR_PTR(-ENOMEM);
mapping_set_unevictable(mapping);
for (i = 0; i < npages; i++) {
p = shmem_read_mapping_page(mapping, i);
if (IS_ERR(p))
@ -586,8 +601,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
return pages;
fail:
while (i--)
put_page(pages[i]);
mapping_clear_unevictable(mapping);
pagevec_init(&pvec);
while (i--) {
if (!pagevec_add(&pvec, pages[i]))
drm_gem_check_release_pagevec(&pvec);
}
if (pagevec_count(&pvec))
drm_gem_check_release_pagevec(&pvec);
kvfree(pages);
return ERR_CAST(p);
@ -605,6 +626,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed)
{
int i, npages;
struct address_space *mapping;
struct pagevec pvec;
mapping = file_inode(obj->filp)->i_mapping;
mapping_clear_unevictable(mapping);
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
@ -614,6 +640,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
npages = obj->size >> PAGE_SHIFT;
pagevec_init(&pvec);
for (i = 0; i < npages; i++) {
if (dirty)
set_page_dirty(pages[i]);
@ -622,8 +649,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
mark_page_accessed(pages[i]);
/* Undo the reference we took when populating the table */
put_page(pages[i]);
if (!pagevec_add(&pvec, pages[i]))
drm_gem_check_release_pagevec(&pvec);
}
if (pagevec_count(&pvec))
drm_gem_check_release_pagevec(&pvec);
kvfree(pages);
}

View file

@ -26,6 +26,8 @@
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 4
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
struct drm_prime_file_private;
struct dma_buf;

View file

@ -218,7 +218,7 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
idr_for_each_entry(leases, entry, object) {
error = 0;
if (!idr_find(&dev->mode_config.crtc_idr, object))
if (!idr_find(&dev->mode_config.object_idr, object))
error = -ENOENT;
else if (!_drm_lease_held_master(lessor, object))
error = -EACCES;
@ -439,7 +439,7 @@ static int fill_object_idr(struct drm_device *dev,
/*
* We're using an IDR to hold the set of leased
* objects, but we don't need to point at the object's
* data structure from the lease as the main crtc_idr
* data structure from the lease as the main object_idr
* will be used to actually find that. Instead, all we
* really want is a 'leased/not-leased' result, for
* which any non-NULL pointer will work fine.
@ -687,7 +687,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
if (lessee->lessor == NULL)
/* owner can use all objects */
object_idr = &lessee->dev->mode_config.crtc_idr;
object_idr = &lessee->dev->mode_config.object_idr;
else
/* lessee can only use allowed object */
object_idr = &lessee->leases;

View file

@ -393,7 +393,8 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
INIT_LIST_HEAD(&dev->mode_config.privobj_list);
idr_init(&dev->mode_config.object_idr);
idr_init(&dev->mode_config.tile_idr);
ida_init(&dev->mode_config.connector_ida);
spin_lock_init(&dev->mode_config.connector_list_lock);
@ -496,7 +497,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
ida_destroy(&dev->mode_config.connector_ida);
idr_destroy(&dev->mode_config.tile_idr);
idr_destroy(&dev->mode_config.crtc_idr);
idr_destroy(&dev->mode_config.object_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);

View file

@ -38,7 +38,7 @@ int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
int ret;
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL,
ret = idr_alloc(&dev->mode_config.object_idr, register_obj ? obj : NULL,
1, 0, GFP_KERNEL);
if (ret >= 0) {
/*
@ -79,7 +79,7 @@ void drm_mode_object_register(struct drm_device *dev,
struct drm_mode_object *obj)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
idr_replace(&dev->mode_config.object_idr, obj, obj->id);
mutex_unlock(&dev->mode_config.idr_mutex);
}
@ -99,7 +99,7 @@ void drm_mode_object_unregister(struct drm_device *dev,
{
mutex_lock(&dev->mode_config.idr_mutex);
if (object->id) {
idr_remove(&dev->mode_config.crtc_idr, object->id);
idr_remove(&dev->mode_config.object_idr, object->id);
object->id = 0;
}
mutex_unlock(&dev->mode_config.idr_mutex);
@ -131,7 +131,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
struct drm_mode_object *obj = NULL;
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
obj = idr_find(&dev->mode_config.object_idr, id);
if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
obj = NULL;
if (obj && obj->id != id)
@ -459,12 +459,13 @@ static int set_property_atomic(struct drm_mode_object *obj,
struct drm_modeset_acquire_ctx ctx;
int ret;
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
drm_modeset_acquire_init(&ctx, 0);
state->acquire_ctx = &ctx;
retry:
if (prop == state->dev->mode_config.dpms_property) {
if (obj->type != DRM_MODE_OBJECT_CONNECTOR) {

View file

@ -71,11 +71,6 @@ struct drm_display_mode *drm_mode_create(struct drm_device *dev)
if (!nmode)
return NULL;
if (drm_mode_object_add(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
kfree(nmode);
return NULL;
}
return nmode;
}
EXPORT_SYMBOL(drm_mode_create);
@ -92,8 +87,6 @@ void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
if (!mode)
return;
drm_mode_object_unregister(dev, &mode->base);
kfree(mode);
}
EXPORT_SYMBOL(drm_mode_destroy);
@ -911,11 +904,9 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
*/
void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
{
int id = dst->base.id;
struct list_head head = dst->head;
*dst = *src;
dst->base.id = id;
dst->head = head;
}
EXPORT_SYMBOL(drm_mode_copy);

View file

@ -22,6 +22,7 @@
*/
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_lock.h>
@ -394,6 +395,7 @@ EXPORT_SYMBOL(drm_modeset_unlock);
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_private_obj *privobj;
struct drm_crtc *crtc;
struct drm_plane *plane;
int ret;
@ -414,6 +416,12 @@ int drm_modeset_lock_all_ctx(struct drm_device *dev,
return ret;
}
drm_for_each_privobj(privobj, dev) {
ret = drm_modeset_lock(&privobj->lock, ctx);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock_all_ctx);

View file

@ -217,9 +217,11 @@ int drm_of_encoder_active_endpoint(struct device_node *node,
}
EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
/*
/**
* drm_of_find_panel_or_bridge - return connected panel or bridge device
* @np: device tree node containing encoder output ports
* @port: port in the device tree node
* @endpoint: endpoint in the device tree node
* @panel: pointer to hold returned drm_panel
* @bridge: pointer to hold returned drm_bridge
*

View file

@ -36,6 +36,9 @@ static LIST_HEAD(panel_list);
* The DRM panel helpers allow drivers to register panel objects with a
* central registry and provide functions to retrieve those panels in display
* drivers.
*
* For easy integration into drivers using the &drm_bridge infrastructure please
* take look at drm_panel_bridge_add() and devm_drm_panel_bridge_add().
*/
/**

View file

@ -220,6 +220,9 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
format_modifier_count++;
}
if (format_modifier_count)
config->allow_fb_modifiers = true;
plane->modifier_count = format_modifier_count;
plane->modifiers = kmalloc_array(format_modifier_count,
sizeof(format_modifiers[0]),

View file

@ -56,6 +56,16 @@
#include "drm_internal.h"
#include <drm/drm_syncobj.h>
struct syncobj_wait_entry {
struct list_head node;
struct task_struct *task;
struct dma_fence *fence;
struct dma_fence_cb fence_cb;
};
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait);
/**
* drm_syncobj_find - lookup and reference a sync object.
* @file_private: drm file private pointer
@ -82,58 +92,33 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
}
EXPORT_SYMBOL(drm_syncobj_find);
static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb,
drm_syncobj_func_t func)
static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait)
{
cb->func = func;
list_add_tail(&cb->node, &syncobj->cb_list);
}
static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
struct dma_fence **fence,
struct drm_syncobj_cb *cb,
drm_syncobj_func_t func)
{
int ret;
*fence = drm_syncobj_fence_get(syncobj);
if (*fence)
return 1;
if (wait->fence)
return;
spin_lock(&syncobj->lock);
/* We've already tried once to get a fence and failed. Now that we
* have the lock, try one more time just to be sure we don't add a
* callback when a fence has already been set.
*/
if (syncobj->fence) {
*fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
lockdep_is_held(&syncobj->lock)));
ret = 1;
} else {
*fence = NULL;
drm_syncobj_add_callback_locked(syncobj, cb, func);
ret = 0;
}
spin_unlock(&syncobj->lock);
return ret;
}
void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb,
drm_syncobj_func_t func)
{
spin_lock(&syncobj->lock);
drm_syncobj_add_callback_locked(syncobj, cb, func);
if (syncobj->fence)
wait->fence = dma_fence_get(
rcu_dereference_protected(syncobj->fence, 1));
else
list_add_tail(&wait->node, &syncobj->cb_list);
spin_unlock(&syncobj->lock);
}
void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb)
static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait)
{
if (!wait->node.next)
return;
spin_lock(&syncobj->lock);
list_del_init(&cb->node);
list_del_init(&wait->node);
spin_unlock(&syncobj->lock);
}
@ -148,7 +133,7 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence)
{
struct dma_fence *old_fence;
struct drm_syncobj_cb *cur, *tmp;
struct syncobj_wait_entry *cur, *tmp;
if (fence)
dma_fence_get(fence);
@ -162,7 +147,7 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
if (fence != old_fence) {
list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
list_del_init(&cur->node);
cur->func(syncobj, cur);
syncobj_wait_syncobj_func(syncobj, cur);
}
}
@ -608,13 +593,6 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
&args->handle);
}
struct syncobj_wait_entry {
struct task_struct *task;
struct dma_fence *fence;
struct dma_fence_cb fence_cb;
struct drm_syncobj_cb syncobj_cb;
};
static void syncobj_wait_fence_func(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
@ -625,11 +603,8 @@ static void syncobj_wait_fence_func(struct dma_fence *fence,
}
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb)
struct syncobj_wait_entry *wait)
{
struct syncobj_wait_entry *wait =
container_of(cb, struct syncobj_wait_entry, syncobj_cb);
/* This happens inside the syncobj lock */
wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
lockdep_is_held(&syncobj->lock)));
@ -688,12 +663,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
*/
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
for (i = 0; i < count; ++i) {
drm_syncobj_fence_get_or_add_callback(syncobjs[i],
&entries[i].fence,
&entries[i].syncobj_cb,
syncobj_wait_syncobj_func);
}
for (i = 0; i < count; ++i)
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
do {
@ -742,9 +713,7 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
cleanup_entries:
for (i = 0; i < count; ++i) {
if (entries[i].syncobj_cb.func)
drm_syncobj_remove_callback(syncobjs[i],
&entries[i].syncobj_cb);
drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
if (entries[i].fence_cb.func)
dma_fence_remove_callback(entries[i].fence,
&entries[i].fence_cb);

View file

@ -105,13 +105,20 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
write_sequnlock(&vblank->seqlock);
}
static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
return vblank->max_vblank_count ?: dev->max_vblank_count;
}
/*
* "No hw counter" fallback implementation of .get_vblank_counter() hook,
* if there is no useable hardware frame counter available.
*/
static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
{
WARN_ON_ONCE(dev->max_vblank_count != 0);
WARN_ON_ONCE(drm_max_vblank_count(dev, pipe) != 0);
return 0;
}
@ -198,6 +205,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
ktime_t t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
int framedur_ns = vblank->framedur_ns;
u32 max_vblank_count = drm_max_vblank_count(dev, pipe);
/*
* Interrupts were disabled prior to this call, so deal with counter
@ -216,9 +224,9 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq);
} while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
if (dev->max_vblank_count != 0) {
if (max_vblank_count) {
/* trust the hw counter when it's around */
diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
diff = (cur_vblank - vblank->last) & max_vblank_count;
} else if (rc && framedur_ns) {
u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
@ -1204,6 +1212,37 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_reset);
/**
* drm_crtc_set_max_vblank_count - configure the hw max vblank counter value
* @crtc: CRTC in question
* @max_vblank_count: max hardware vblank counter value
*
* Update the maximum hardware vblank counter value for @crtc
* at runtime. Useful for hardware where the operation of the
* hardware vblank counter depends on the currently active
* display configuration.
*
* For example, if the hardware vblank counter does not work
* when a specific connector is active the maximum can be set
* to zero. And when that specific connector isn't active the
* maximum can again be set to the appropriate non-zero value.
*
* If used, must be called before drm_vblank_on().
*/
void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
u32 max_vblank_count)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
WARN_ON(dev->max_vblank_count);
WARN_ON(!READ_ONCE(vblank->inmodeset));
vblank->max_vblank_count = max_vblank_count;
}
EXPORT_SYMBOL(drm_crtc_set_max_vblank_count);
/**
* drm_crtc_vblank_on - enable vblank events on a CRTC
* @crtc: CRTC in question

View file

@ -449,7 +449,7 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence,
const char *type, struct seq_file *m)
{
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
seq_printf(m, "\t%9s: %s %s seq %u\n",
seq_printf(m, "\t%9s: %s %s seq %llu\n",
type,
fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence),

View file

@ -246,8 +246,8 @@ static void mic_post_disable(struct drm_bridge *bridge)
}
static void mic_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct exynos_mic *mic = bridge->driver_private;

View file

@ -819,7 +819,8 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
return;
}
ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, m, false);
ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
&hdata->connector, m);
if (!ret)
ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf));
if (ret > 0) {

View file

@ -22,6 +22,7 @@
#include <drm/drmP.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
@ -33,32 +34,15 @@ static struct kirin_dc_ops *dc_ops;
static int kirin_drm_kms_cleanup(struct drm_device *dev)
{
struct kirin_drm_private *priv = dev->dev_private;
if (priv->fbdev) {
drm_fbdev_cma_fini(priv->fbdev);
priv->fbdev = NULL;
}
drm_kms_helper_poll_fini(dev);
dc_ops->cleanup(to_platform_device(dev->dev));
drm_mode_config_cleanup(dev);
devm_kfree(dev->dev, priv);
dev->dev_private = NULL;
return 0;
}
static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
{
struct kirin_drm_private *priv = dev->dev_private;
drm_fbdev_cma_hotplug_event(priv->fbdev);
}
static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.output_poll_changed = kirin_fbdev_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@ -76,14 +60,8 @@ static void kirin_drm_mode_config_init(struct drm_device *dev)
static int kirin_drm_kms_init(struct drm_device *dev)
{
struct kirin_drm_private *priv;
int ret;
priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev->dev_private = priv;
dev_set_drvdata(dev->dev, dev);
/* dev->mode_config initialization */
@ -117,26 +95,14 @@ static int kirin_drm_kms_init(struct drm_device *dev)
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(dev);
priv->fbdev = drm_fbdev_cma_init(dev, 32,
dev->mode_config.num_connector);
if (IS_ERR(priv->fbdev)) {
DRM_ERROR("failed to initialize fbdev.\n");
ret = PTR_ERR(priv->fbdev);
goto err_cleanup_poll;
}
return 0;
err_cleanup_poll:
drm_kms_helper_poll_fini(dev);
err_unbind_all:
component_unbind_all(dev->dev, dev);
err_dc_cleanup:
dc_ops->cleanup(to_platform_device(dev->dev));
err_mode_config_cleanup:
drm_mode_config_cleanup(dev);
devm_kfree(dev->dev, priv);
dev->dev_private = NULL;
return ret;
}
@ -199,6 +165,8 @@ static int kirin_drm_bind(struct device *dev)
if (ret)
goto err_kms_cleanup;
drm_fbdev_generic_setup(drm_dev, 32);
return 0;
err_kms_cleanup:

View file

@ -19,10 +19,6 @@ struct kirin_dc_ops {
void (*cleanup)(struct platform_device *pdev);
};
struct kirin_drm_private {
struct drm_fbdev_cma *fbdev;
};
extern const struct kirin_dc_ops ade_dc_ops;
#endif /* __KIRIN_DRM_DRV_H__ */

View file

@ -359,10 +359,10 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
if (modes_changed) {
drm_helper_probe_single_connector_modes(connector, 0, 0);
/* Disable the crtc to ensure a full modeset is
* performed whenever it's turned on again. */
if (crtc)
drm_crtc_force_disable(crtc);
drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y,
crtc->primary->fb);
}
return 0;

View file

@ -845,11 +845,12 @@ static int tda998x_write_aif(struct tda998x_priv *priv,
}
static void
tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
tda998x_write_avi(struct tda998x_priv *priv, const struct drm_display_mode *mode)
{
union hdmi_infoframe frame;
drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
&priv->connector, mode);
frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
@ -1122,7 +1123,6 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs tda998x_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = tda998x_connector_detect,
@ -1339,8 +1339,8 @@ static void tda998x_bridge_disable(struct drm_bridge *bridge)
}
static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
unsigned long tmds_clock;

View file

@ -2797,14 +2797,7 @@ static void intel_seq_print_mode(struct seq_file *m, int tabs,
for (i = 0; i < tabs; i++)
seq_putc(m, '\t');
seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
mode->base.id, mode->name,
mode->vrefresh, mode->clock,
mode->hdisplay, mode->hsync_start,
mode->hsync_end, mode->htotal,
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
}
static void intel_encoder_info(struct seq_file *m,

View file

@ -645,7 +645,7 @@ last_request_on_engine(struct i915_timeline *timeline,
rq = i915_gem_active_raw(&timeline->last_request,
&engine->i915->drm.struct_mutex);
if (rq && rq->engine == engine) {
GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
GEM_TRACE("last request for %s on engine %s: %llx:%llu\n",
timeline->name, engine->name,
rq->fence.context, rq->fence.seqno);
GEM_BUG_ON(rq->timeline != timeline);
@ -682,14 +682,14 @@ static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
* switch-to-kernel-context?
*/
if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
GEM_TRACE("%s needs barrier for %llx:%d\n",
GEM_TRACE("%s needs barrier for %llx:%lld\n",
ring->timeline->name,
rq->fence.context,
rq->fence.seqno);
return false;
}
GEM_TRACE("%s has barrier after %llx:%d\n",
GEM_TRACE("%s has barrier after %llx:%lld\n",
ring->timeline->name,
rq->fence.context,
rq->fence.seqno);
@ -745,7 +745,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
if (prev->gem_context == i915->kernel_context)
continue;
GEM_TRACE("add barrier on %s for %llx:%d\n",
GEM_TRACE("add barrier on %s for %llx:%lld\n",
engine->name,
prev->fence.context,
prev->fence.seqno);

View file

@ -182,7 +182,7 @@ static void free_capture_list(struct i915_request *request)
static void __retire_engine_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d\n",
__func__, engine->name,
rq->fence.context, rq->fence.seqno,
rq->global_seqno,
@ -244,7 +244,7 @@ static void i915_request_retire(struct i915_request *request)
{
struct i915_gem_active *active, *next;
GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
request->engine->name,
request->fence.context, request->fence.seqno,
request->global_seqno,
@ -307,7 +307,7 @@ void i915_request_retire_upto(struct i915_request *rq)
struct intel_ring *ring = rq->ring;
struct i915_request *tmp;
GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
rq->engine->name,
rq->fence.context, rq->fence.seqno,
rq->global_seqno,
@ -355,7 +355,7 @@ void __i915_request_submit(struct i915_request *request)
struct intel_engine_cs *engine = request->engine;
u32 seqno;
GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
engine->timeline.seqno + 1,
@ -405,7 +405,7 @@ void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
request->global_seqno,
@ -874,7 +874,7 @@ void i915_request_add(struct i915_request *request)
struct i915_request *prev;
u32 *cs;
GEM_TRACE("%s fence %llx:%d\n",
GEM_TRACE("%s fence %llx:%lld\n",
engine->name, request->fence.context, request->fence.seqno);
lockdep_assert_held(&request->i915->drm.struct_mutex);

View file

@ -390,7 +390,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
if (!fence)
return;
pr_notice("Asynchronous wait on fence %s:%s:%x timed out (hint:%pS)\n",
pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n",
cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno,

View file

@ -94,6 +94,10 @@ void intel_connector_destroy(struct drm_connector *connector)
intel_panel_fini(&intel_connector->panel);
drm_connector_cleanup(connector);
if (intel_connector->port)
drm_dp_mst_put_port_malloc(intel_connector->port);
kfree(connector);
}

View file

@ -12739,6 +12739,10 @@ static int intel_atomic_check(struct drm_device *dev,
"[modeset]" : "[fastset]");
}
ret = drm_dp_mst_atomic_check(state);
if (ret)
return ret;
if (any_ms) {
ret = intel_modeset_checks(state);

View file

@ -40,8 +40,12 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_connector *connector = conn_state->connector;
void *port = to_intel_connector(connector)->port;
struct drm_atomic_state *state = pipe_config->base.state;
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_crtc_state *old_crtc_state =
drm_atomic_get_old_crtc_state(state, crtc);
int bpp;
int lane_count, slots = 0;
int lane_count, slots =
to_intel_crtc_state(old_crtc_state)->dp_m_n.tu;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
@ -106,35 +110,39 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
return 0;
}
static int intel_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_connector_state *new_conn_state)
static int
intel_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_connector_state *new_conn_state)
{
struct drm_atomic_state *state = new_conn_state->state;
struct drm_connector_state *old_conn_state;
struct drm_crtc *old_crtc;
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(state, connector);
struct intel_connector *intel_connector =
to_intel_connector(connector);
struct drm_crtc *new_crtc = new_conn_state->crtc;
struct drm_crtc_state *crtc_state;
int slots, ret = 0;
struct drm_dp_mst_topology_mgr *mgr;
int ret = 0;
old_conn_state = drm_atomic_get_old_connector_state(state, connector);
old_crtc = old_conn_state->crtc;
if (!old_crtc)
return ret;
if (!old_conn_state->crtc)
return 0;
crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc);
slots = to_intel_crtc_state(crtc_state)->dp_m_n.tu;
if (drm_atomic_crtc_needs_modeset(crtc_state) && slots > 0) {
struct drm_dp_mst_topology_mgr *mgr;
struct drm_encoder *old_encoder;
/* We only want to free VCPI if this state disables the CRTC on this
* connector
*/
if (new_crtc) {
crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
old_encoder = old_conn_state->best_encoder;
mgr = &enc_to_mst(old_encoder)->primary->dp.mst_mgr;
ret = drm_dp_atomic_release_vcpi_slots(state, mgr, slots);
if (ret)
DRM_DEBUG_KMS("failed releasing %d vcpi slots:%d\n", slots, ret);
else
to_intel_crtc_state(crtc_state)->dp_m_n.tu = 0;
if (!crtc_state ||
!drm_atomic_crtc_needs_modeset(crtc_state) ||
crtc_state->enable)
return 0;
}
mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr;
ret = drm_dp_atomic_release_vcpi_slots(state, mgr,
intel_connector->port);
return ret;
}
@ -456,6 +464,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
drm_dp_mst_get_port_malloc(port);
connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
@ -516,20 +525,10 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_put(connector);
}
static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
drm_kms_helper_hotplug_event(dev);
}
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
.register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector,
.hotplug = intel_dp_mst_hotplug,
};
static struct intel_dp_mst_encoder *

View file

@ -1079,7 +1079,6 @@ struct intel_hdmi {
} dp_dual_mode;
bool has_hdmi_sink;
bool has_audio;
bool rgb_quant_range_selectable;
struct intel_connector *attached_connector;
struct cec_notifier *cec_notifier;
};

View file

@ -1226,7 +1226,7 @@ static void print_request(struct drm_printer *m,
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n",
drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n",
prefix,
rq->global_seqno,
i915_request_completed(rq) ? "!" : "",

View file

@ -478,18 +478,14 @@ static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
struct drm_connector *connector = &intel_hdmi->attached_connector->base;
bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported ||
connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
union hdmi_infoframe frame;
int ret;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode,
is_hdmi2_sink);
conn_state->connector,
adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
@ -502,12 +498,12 @@ static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
drm_hdmi_avi_infoframe_quant_range(&frame.avi, adjusted_mode,
drm_hdmi_avi_infoframe_quant_range(&frame.avi,
conn_state->connector,
adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
intel_hdmi->rgb_quant_range_selectable,
is_hdmi2_sink);
HDMI_QUANTIZATION_RANGE_FULL);
drm_hdmi_avi_infoframe_content_type(&frame.avi,
conn_state);
@ -1836,7 +1832,6 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
@ -1921,9 +1916,6 @@ intel_hdmi_set_edid(struct drm_connector *connector)
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
intel_hdmi->rgb_quant_range_selectable =
drm_rgb_quant_range_selectable(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);

View file

@ -435,7 +435,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
engine->name, n,
port[n].context_id, count,
rq->global_seqno,
@ -728,7 +728,7 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
while (num_ports-- && port_isset(port)) {
struct i915_request *rq = port_request(port);
GEM_TRACE("%s:port%u global=%d (fence %llx:%d), (current %d)\n",
GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d)\n",
rq->engine->name,
(unsigned int)(port - execlists->port),
rq->global_seqno,
@ -956,7 +956,7 @@ static void process_csb(struct intel_engine_cs *engine)
EXECLISTS_ACTIVE_USER));
rq = port_unpack(port, &count);
GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
engine->name,
port->context_id, count,
rq ? rq->global_seqno : 0,

View file

@ -462,10 +462,8 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
u8 buf[VIDEO_DIP_DATA_SIZE];
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_lspcon *lspcon = &dig_port->lspcon;
struct intel_dp *intel_dp = &dig_port->dp;
struct drm_connector *connector = &intel_dp->attached_connector->base;
const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode;
bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
if (!lspcon->active) {
DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
@ -473,7 +471,8 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
}
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
mode, is_hdmi2_sink);
conn_state->connector,
adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
@ -488,11 +487,12 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
}
drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
drm_hdmi_avi_infoframe_quant_range(&frame.avi,
conn_state->connector,
adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
false, is_hdmi2_sink);
HDMI_QUANTIZATION_RANGE_FULL);
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {

View file

@ -102,7 +102,6 @@ struct intel_sdvo {
bool has_hdmi_monitor;
bool has_hdmi_audio;
bool rgb_quant_range_selectable;
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
@ -980,29 +979,30 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
}
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct intel_crtc_state *pipe_config)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
const struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
union hdmi_infoframe frame;
int ret;
ssize_t len;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
&pipe_config->base.adjusted_mode,
false);
conn_state->connector,
adjusted_mode);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return false;
}
if (intel_sdvo->rgb_quant_range_selectable) {
if (pipe_config->limited_color_range)
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_LIMITED;
else
frame.avi.quantization_range =
HDMI_QUANTIZATION_RANGE_FULL;
}
drm_hdmi_avi_infoframe_quant_range(&frame.avi,
conn_state->connector,
adjusted_mode,
pipe_config->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
if (len < 0)
@ -1315,7 +1315,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
intel_sdvo_set_avi_infoframe(intel_sdvo,
crtc_state, conn_state);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@ -1801,8 +1802,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
if (intel_sdvo_connector->is_hdmi) {
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
intel_sdvo->rgb_quant_range_selectable =
drm_rgb_quant_range_selectable(edid);
}
} else
status = connector_status_disconnected;
@ -1851,7 +1850,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
intel_sdvo->has_hdmi_monitor = false;
intel_sdvo->has_hdmi_audio = false;
intel_sdvo->rgb_quant_range_selectable = false;
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;

View file

@ -475,7 +475,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n",
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(engine, &p,
"%s\n", engine->name);
@ -576,7 +576,7 @@ static int active_request_put(struct i915_request *rq)
return 0;
if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n",
GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n",
rq->engine->name,
rq->fence.context,
rq->fence.seqno,
@ -753,7 +753,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n",
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(engine, &p,
"%s\n", engine->name);
@ -952,7 +952,7 @@ static int igt_reset_wait(void *arg)
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n",
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
@ -1131,7 +1131,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n",
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
@ -1326,7 +1326,7 @@ static int igt_reset_queue(void *arg)
if (!wait_until_running(&h, prev)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s(%s): Failed to start request %x, at %x\n",
pr_err("%s(%s): Failed to start request %llx, at %x\n",
__func__, engine->name,
prev->fence.seqno, hws_seqno(&h, prev));
intel_engine_dump(engine, &p,
@ -1437,7 +1437,7 @@ static int igt_handle_error(void *arg)
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n",
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);

View file

@ -981,7 +981,8 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
u8 buffer[17];
ssize_t err;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
&hdmi->conn, mode);
if (err < 0) {
dev_err(hdmi->dev,
"Failed to get AVI infoframe from mode: %zd\n", err);
@ -1370,8 +1371,8 @@ static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
}
static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);

View file

@ -152,6 +152,23 @@ static void meson_vpu_init(struct meson_drm *priv)
writel_relaxed(0x20000, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
}
static void meson_remove_framebuffers(void)
{
struct apertures_struct *ap;
ap = alloc_apertures(1);
if (!ap)
return;
/* The framebuffer can be located anywhere in RAM */
ap->ranges[0].base = 0;
ap->ranges[0].size = ~0;
drm_fb_helper_remove_conflicting_framebuffers(ap, "meson-drm-fb",
false);
kfree(ap);
}
static int meson_drv_bind_master(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
@ -262,6 +279,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto free_drm;
/* Remove early framebuffers (ie. simplefb) */
meson_remove_framebuffers();
drm_mode_config_init(drm);
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;

Some files were not shown because too many files have changed in this diff Show more