drm-misc-next for 5.19:

UAPI Changes:
 
 Cross-subsystem Changes:
 
 Core Changes:
   - atomic: Add atomic_print_state to private objects
   - edid: Constify the EDID parsing API, rework of the API
   - dma-buf: Add dma_resv_replace_fences, dma_resv_get_singleton, make
     dma_resv_excl_fence private
   - format: Support monochrome formats
   - fbdev: fixes for cfb_imageblit and sys_imageblit, pagelist
     corruption fix
   - selftests: several small fixes
   - ttm: Rework bulk move handling
 
 Driver Changes:
   - Switch all relevant drivers to drm_mode_copy or drm_mode_duplicate
   - bridge: conversions to devm_drm_of_get_bridge and panel_bridge,
     autosuspend for analogix_dp, audio support for it66121, DSI to DPI
     support for tc358767, PLL fixes and I2C support for icn6211
   - bridge_connector: Enable HPD if supported
   - etnaviv: fencing improvements
   - gma500: GEM and GTT improvements, connector handling fixes
   - komeda: switch to plane reset helper
   - mediatek: MIPI DSI improvements
   - omapdrm: GEM improvements
   - panel: DT bindings fixes for st7735r, few fixes for ssd130x, new
     panels: ltk035c5444t, B133UAN01, NV3052C
   - qxl: Allow to run on arm64
   - sysfb: Kconfig rework, support for VESA graphic mode selection
   - vc4: Add a tracepoint for CL submissions, HDMI YUV output,
     HDMI and clock improvements
   - virtio: Remove restriction of non-zero blob_flags,
   - vmwgfx: support for CursorMob and CursorBypass 4, various
     improvements and small fixes
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCYk6nlQAKCRDj7w1vZxhR
 xaTTAP0ZeeXRWIYxFfmuEAUd3H4ztvr3cx/QU/85qMXQUM4gSgD/cvQHMeucrFlX
 2Bafjzl/p1tQrth0HNOkSz85dABUUws=
 =rJSD
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2022-04-07' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.19:

UAPI Changes:

Cross-subsystem Changes:

Core Changes:
  - atomic: Add atomic_print_state to private objects
  - edid: Constify the EDID parsing API, rework of the API
  - dma-buf: Add dma_resv_replace_fences, dma_resv_get_singleton, make
    dma_resv_excl_fence private
  - format: Support monochrome formats
  - fbdev: fixes for cfb_imageblit and sys_imageblit, pagelist
    corruption fix
  - selftests: several small fixes
  - ttm: Rework bulk move handling

Driver Changes:
  - Switch all relevant drivers to drm_mode_copy or drm_mode_duplicate
  - bridge: conversions to devm_drm_of_get_bridge and panel_bridge,
    autosuspend for analogix_dp, audio support for it66121, DSI to DPI
    support for tc358767, PLL fixes and I2C support for icn6211
  - bridge_connector: Enable HPD if supported
  - etnaviv: fencing improvements
  - gma500: GEM and GTT improvements, connector handling fixes
  - komeda: switch to plane reset helper
  - mediatek: MIPI DSI improvements
  - omapdrm: GEM improvements
  - panel: DT bindings fixes for st7735r, few fixes for ssd130x, new
    panels: ltk035c5444t, B133UAN01, NV3052C
  - qxl: Allow to run on arm64
  - sysfb: Kconfig rework, support for VESA graphic mode selection
  - vc4: Add a tracepoint for CL submissions, HDMI YUV output,
    HDMI and clock improvements
  - virtio: Remove restriction of non-zero blob_flags,
  - vmwgfx: support for CursorMob and CursorBypass 4, various
    improvements and small fixes

[airlied: fixup conflict with newvision panel callbacks]
Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085940.pnflvjojs4qw4b77@houat
This commit is contained in:
Dave Airlie 2022-04-12 17:39:45 +10:00
commit b85ffe47c4
195 changed files with 5795 additions and 2790 deletions

View file

@ -38,6 +38,9 @@ properties:
interrupts:
maxItems: 1
"#sound-dai-cells":
const: 0
ports:
$ref: /schemas/graph.yaml#/properties/ports

View file

@ -53,16 +53,32 @@ properties:
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
description: |
DSI input port. The remote endpoint phandle should be a
reference to a valid DSI output endpoint node
properties:
endpoint:
$ref: /schemas/media/video-interfaces.yaml#
unevaluatedProperties: false
properties:
data-lanes:
description: array of physical DSI data lane indexes.
minItems: 1
items:
- const: 1
- const: 2
- const: 3
- const: 4
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: |
DPI input port. The remote endpoint phandle should be a
reference to a valid DPI output endpoint node
DPI input/output port. The remote endpoint phandle should be a
reference to a valid DPI output or input endpoint node.
port@2:
$ref: /schemas/graph.yaml#/properties/port

View file

@ -0,0 +1,59 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/leadtek,ltk035c5444t.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Leadtek ltk035c5444t 3.5" (640x480 pixels) 24-bit IPS LCD panel
maintainers:
- Paul Cercueil <paul@crapouillou.net>
- Christophe Branchereau <cbranchereau@gmail.com>
allOf:
- $ref: panel-common.yaml#
- $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
const: leadtek,ltk035c5444t
backlight: true
port: true
power-supply: true
reg: true
reset-gpios: true
required:
- compatible
- power-supply
- reset-gpios
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "leadtek,ltk035c5444t";
reg = <0>;
spi-3wire;
spi-max-frequency = <3125000>;
reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>;
backlight = <&backlight>;
power-supply = <&vcc>;
port {
panel_input: endpoint {
remote-endpoint = <&panel_output>;
};
};
};
};

View file

@ -32,15 +32,13 @@ properties:
- okaya,rh128128t
- const: sitronix,st7715r
spi-max-frequency:
maximum: 32000000
dc-gpios:
maxItems: 1
description: Display data/command selection (D/CX)
backlight: true
reg: true
spi-max-frequency: true
reset-gpios: true
rotation: true
@ -48,7 +46,6 @@ required:
- compatible
- reg
- dc-gpios
- reset-gpios
additionalProperties: false
@ -72,6 +69,7 @@ examples:
dc-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>;
rotation = <270>;
backlight = <&backlight>;
};
};

View file

@ -466,6 +466,15 @@ DRM MM Range Allocator Function References
.. kernel-doc:: drivers/gpu/drm/drm_mm.c
:export:
DRM Buddy Allocator
===================
DRM Buddy Function References
-----------------------------
.. kernel-doc:: drivers/gpu/drm/drm_buddy.c
:export:
DRM Cache Handling and Fast WC memcpy()
=======================================

View file

@ -148,7 +148,9 @@ clients together with the legacy drmAuth authentication procedure.
If a driver advertises render node support, DRM core will create a
separate render node called renderD<num>. There will be one render node
per device. No ioctls except PRIME-related ioctls will be allowed on
this node. Especially GEM_OPEN will be explicitly prohibited. Render
this node. Especially GEM_OPEN will be explicitly prohibited. For a
complete list of driver-independent ioctls that can be used on render
nodes, see the ioctls marked DRM_RENDER_ALLOW in drm_ioctl.c Render
nodes are designed to avoid the buffer-leaks, which occur if clients
guess the flink names or mmap offsets on the legacy interface.
Additionally to this basic interface, drivers must mark their

View file

@ -6308,6 +6308,11 @@ S: Maintained
F: Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml
F: drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
DRM DRIVER FOR PARADE PS8640 BRIDGE CHIP
R: Douglas Anderson <dianders@chromium.org>
F: Documentation/devicetree/bindings/display/bridge/ps8640.yaml
F: drivers/gpu/drm/bridge/parade-ps8640.c
DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
@ -6421,6 +6426,11 @@ DRM DRIVER FOR TDFX VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/tdfx/
DRM DRIVER FOR TI SN65DSI86 BRIDGE CHIP
R: Douglas Anderson <dianders@chromium.org>
F: Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
F: drivers/gpu/drm/bridge/ti-sn65dsi86.c
DRM DRIVER FOR TPO TPG110 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
@ -6540,6 +6550,7 @@ R: Jonas Karlman <jonas@kwiboo.se>
R: Jernej Skrabec <jernej.skrabec@gmail.com>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/bridge/
F: drivers/gpu/drm/bridge/
DRM DRIVERS FOR EXYNOS

View file

@ -940,6 +940,12 @@ config GART_IOMMU
If unsure, say Y.
config BOOT_VESA_SUPPORT
bool
help
If true, at least one selected framebuffer driver can take advantage
of VESA video modes set at an early boot stage via the vga= parameter.
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL

View file

@ -83,7 +83,7 @@ static int vesa_probe(void)
(vminfo.memory_layout == 4 ||
vminfo.memory_layout == 6) &&
vminfo.memory_planes == 1) {
#ifdef CONFIG_FB_BOOT_VESA_SUPPORT
#ifdef CONFIG_BOOT_VESA_SUPPORT
/* Graphics mode, color, linear frame buffer
supported. Only register the mode if
if framebuffer is configured, however,
@ -121,7 +121,7 @@ static int vesa_set_mode(struct mode_info *mode)
if ((vminfo.mode_attr & 0x15) == 0x05) {
/* It's a supported text mode */
is_graphic = 0;
#ifdef CONFIG_FB_BOOT_VESA_SUPPORT
#ifdef CONFIG_BOOT_VESA_SUPPORT
} else if ((vminfo.mode_attr & 0x99) == 0x99) {
/* It's a graphics mode with linear frame buffer */
is_graphic = 1;

View file

@ -443,7 +443,7 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
* as a file descriptor by calling dma_buf_fd().
*
* 2. Userspace passes this file-descriptors to all drivers it wants this buffer
* to share with: First the filedescriptor is converted to a &dma_buf using
* to share with: First the file descriptor is converted to a &dma_buf using
* dma_buf_get(). Then the buffer is attached to the device using
* dma_buf_attach().
*

View file

@ -34,6 +34,7 @@
*/
#include <linux/dma-resv.h>
#include <linux/dma-fence-array.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
@ -56,6 +57,12 @@
DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
struct dma_resv_list {
struct rcu_head rcu;
u32 shared_count, shared_max;
struct dma_fence __rcu *shared[];
};
/**
* dma_resv_list_alloc - allocate fence list
* @shared_max: number of fences we need space for
@ -133,8 +140,19 @@ void dma_resv_fini(struct dma_resv *obj)
}
EXPORT_SYMBOL(dma_resv_fini);
static inline struct dma_fence *
dma_resv_excl_fence(struct dma_resv *obj)
{
return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
}
static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
{
return rcu_dereference_check(obj->fence, dma_resv_held(obj));
}
/**
* dma_resv_reserve_shared - Reserve space to add shared fences to
* dma_resv_reserve_fences - Reserve space to add shared fences to
* a dma_resv.
* @obj: reservation object
* @num_fences: number of fences we want to add
@ -149,7 +167,7 @@ EXPORT_SYMBOL(dma_resv_fini);
* RETURNS
* Zero for success, or -errno
*/
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
{
struct dma_resv_list *old, *new;
unsigned int i, j, k, max;
@ -212,7 +230,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
return 0;
}
EXPORT_SYMBOL(dma_resv_reserve_shared);
EXPORT_SYMBOL(dma_resv_reserve_fences);
#ifdef CONFIG_DEBUG_MUTEXES
/**
@ -220,7 +238,7 @@ EXPORT_SYMBOL(dma_resv_reserve_shared);
* @obj: the dma_resv object to reset
*
* Reset the number of pre-reserved shared slots to test that drivers do
* correct slot allocation using dma_resv_reserve_shared(). See also
* correct slot allocation using dma_resv_reserve_fences(). See also
* &dma_resv_list.shared_max.
*/
void dma_resv_reset_shared_max(struct dma_resv *obj)
@ -242,7 +260,7 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max);
* @fence: the shared fence to add
*
* Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
* dma_resv_reserve_shared() has been called.
* dma_resv_reserve_fences() has been called.
*
* See also &dma_resv.fence for a discussion of the semantics.
*/
@ -289,41 +307,72 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
}
EXPORT_SYMBOL(dma_resv_add_shared_fence);
/**
* dma_resv_replace_fences - replace fences in the dma_resv obj
* @obj: the reservation object
* @context: the context of the fences to replace
* @replacement: the new fence to use instead
*
* Replace fences with a specified context with a new fence. Only valid if the
* operation represented by the original fence has no longer access to the
* resources represented by the dma_resv object when the new fence completes.
*
* And example for using this is replacing a preemption fence with a page table
* update fence which makes the resource inaccessible.
*/
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
struct dma_fence *replacement)
{
struct dma_resv_list *list;
struct dma_fence *old;
unsigned int i;
dma_resv_assert_held(obj);
write_seqcount_begin(&obj->seq);
old = dma_resv_excl_fence(obj);
if (old->context == context) {
RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
dma_fence_put(old);
}
list = dma_resv_shared_list(obj);
for (i = 0; list && i < list->shared_count; ++i) {
old = rcu_dereference_protected(list->shared[i],
dma_resv_held(obj));
if (old->context != context)
continue;
rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
dma_fence_put(old);
}
write_seqcount_end(&obj->seq);
}
EXPORT_SYMBOL(dma_resv_replace_fences);
/**
* dma_resv_add_excl_fence - Add an exclusive fence.
* @obj: the reservation object
* @fence: the exclusive fence to add
*
* Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
* Note that this function replaces all fences attached to @obj, see also
* &dma_resv.fence_excl for a discussion of the semantics.
* See also &dma_resv.fence_excl for a discussion of the semantics.
*/
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
{
struct dma_fence *old_fence = dma_resv_excl_fence(obj);
struct dma_resv_list *old;
u32 i = 0;
dma_resv_assert_held(obj);
old = dma_resv_shared_list(obj);
if (old)
i = old->shared_count;
dma_fence_get(fence);
write_seqcount_begin(&obj->seq);
/* write_seqcount_begin provides the necessary memory barrier */
RCU_INIT_POINTER(obj->fence_excl, fence);
if (old)
old->shared_count = 0;
write_seqcount_end(&obj->seq);
/* inplace update, no shared fences */
while (i--)
dma_fence_put(rcu_dereference_protected(old->shared[i],
dma_resv_held(obj)));
dma_fence_put(old_fence);
}
EXPORT_SYMBOL(dma_resv_add_excl_fence);
@ -594,6 +643,59 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
}
EXPORT_SYMBOL_GPL(dma_resv_get_fences);
/**
* dma_resv_get_singleton - Get a single fence for all the fences
* @obj: the reservation object
* @write: true if we should return all fences
* @fence: the resulting fence
*
* Get a single fence representing all the fences inside the resv object.
* Returns either 0 for success or -ENOMEM.
*
* Warning: This can't be used like this when adding the fence back to the resv
* object since that can lead to stack corruption when finalizing the
* dma_fence_array.
*
* Returns 0 on success and negative error values on failure.
*/
int dma_resv_get_singleton(struct dma_resv *obj, bool write,
struct dma_fence **fence)
{
struct dma_fence_array *array;
struct dma_fence **fences;
unsigned count;
int r;
r = dma_resv_get_fences(obj, write, &count, &fences);
if (r)
return r;
if (count == 0) {
*fence = NULL;
return 0;
}
if (count == 1) {
*fence = fences[0];
kfree(fences);
return 0;
}
array = dma_fence_array_create(count, fences,
dma_fence_context_alloc(1),
1, false);
if (!array) {
while (count--)
dma_fence_put(fences[count]);
kfree(fences);
return -ENOMEM;
}
*fence = &array->base;
return 0;
}
EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
/**
* dma_resv_wait_timeout - Wait on reservation's objects
* shared and/or exclusive fences.

View file

@ -75,18 +75,17 @@ static int test_signaling(void *arg, bool shared)
goto err_free;
}
if (shared) {
r = dma_resv_reserve_shared(&resv, 1);
if (r) {
pr_err("Resv shared slot allocation failed\n");
goto err_unlock;
}
dma_resv_add_shared_fence(&resv, f);
} else {
dma_resv_add_excl_fence(&resv, f);
r = dma_resv_reserve_fences(&resv, 1);
if (r) {
pr_err("Resv shared slot allocation failed\n");
goto err_unlock;
}
if (shared)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);
if (dma_resv_test_signaled(&resv, shared)) {
pr_err("Resv unexpectedly signaled\n");
r = -EINVAL;
@ -134,18 +133,17 @@ static int test_for_each(void *arg, bool shared)
goto err_free;
}
if (shared) {
r = dma_resv_reserve_shared(&resv, 1);
if (r) {
pr_err("Resv shared slot allocation failed\n");
goto err_unlock;
}
dma_resv_add_shared_fence(&resv, f);
} else {
dma_resv_add_excl_fence(&resv, f);
r = dma_resv_reserve_fences(&resv, 1);
if (r) {
pr_err("Resv shared slot allocation failed\n");
goto err_unlock;
}
if (shared)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);
r = -ENOENT;
dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
if (!r) {
@ -206,18 +204,17 @@ static int test_for_each_unlocked(void *arg, bool shared)
goto err_free;
}
if (shared) {
r = dma_resv_reserve_shared(&resv, 1);
if (r) {
pr_err("Resv shared slot allocation failed\n");
dma_resv_unlock(&resv);
goto err_free;
}
dma_resv_add_shared_fence(&resv, f);
} else {
dma_resv_add_excl_fence(&resv, f);
r = dma_resv_reserve_fences(&resv, 1);
if (r) {
pr_err("Resv shared slot allocation failed\n");
dma_resv_unlock(&resv);
goto err_free;
}
if (shared)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);
dma_resv_unlock(&resv);
r = -ENOENT;
@ -290,18 +287,17 @@ static int test_get_fences(void *arg, bool shared)
goto err_resv;
}
if (shared) {
r = dma_resv_reserve_shared(&resv, 1);
if (r) {
pr_err("Resv shared slot allocation failed\n");
dma_resv_unlock(&resv);
goto err_resv;
}
dma_resv_add_shared_fence(&resv, f);
} else {
dma_resv_add_excl_fence(&resv, f);
r = dma_resv_reserve_fences(&resv, 1);
if (r) {
pr_err("Resv shared slot allocation failed\n");
dma_resv_unlock(&resv);
goto err_resv;
}
if (shared)
dma_resv_add_shared_fence(&resv, f);
else
dma_resv_add_excl_fence(&resv, f);
dma_resv_unlock(&resv);
r = dma_resv_get_fences(&resv, shared, &i, &fences);

View file

@ -219,12 +219,12 @@ config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
config SYSFB
bool
default y
depends on X86 || EFI
select BOOT_VESA_SUPPORT
config SYSFB_SIMPLEFB
bool "Mark VGA/VBE/EFI FB as generic system framebuffer"
depends on SYSFB
depends on X86 || EFI
select SYSFB
help
Firmwares often provide initial graphics framebuffers so the BIOS,
bootloader or kernel can show basic video-output during boot for

View file

@ -253,53 +253,18 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct amdgpu_amdkfd_fence *ef)
{
struct dma_resv *resv = bo->tbo.base.resv;
struct dma_resv_list *old, *new;
unsigned int i, j, k;
struct dma_fence *replacement;
if (!ef)
return -EINVAL;
old = dma_resv_shared_list(resv);
if (!old)
return 0;
new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
if (!new)
return -ENOMEM;
/* Go through all the shared fences in the resevation object and sort
* the interesting ones to the end of the list.
/* TODO: Instead of block before we should use the fence of the page
* table update and TLB flush here directly.
*/
for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
struct dma_fence *f;
f = rcu_dereference_protected(old->shared[i],
dma_resv_held(resv));
if (f->context == ef->base.context)
RCU_INIT_POINTER(new->shared[--j], f);
else
RCU_INIT_POINTER(new->shared[k++], f);
}
new->shared_max = old->shared_max;
new->shared_count = k;
/* Install the new fence list, seqcount provides the barriers */
write_seqcount_begin(&resv->seq);
RCU_INIT_POINTER(resv->fence, new);
write_seqcount_end(&resv->seq);
/* Drop the references to the removed fences or move them to ef_list */
for (i = j; i < old->shared_count; ++i) {
struct dma_fence *f;
f = rcu_dereference_protected(new->shared[i],
dma_resv_held(resv));
dma_fence_put(f);
}
kfree_rcu(old, rcu);
replacement = dma_fence_get_stub();
dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
replacement);
dma_fence_put(replacement);
return 0;
}
@ -1268,7 +1233,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
amdgpu_bo_fence(vm->root.bo,
@ -2606,7 +2571,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
* Add process eviction fence to bo so they can
* evict each other.
*/
ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);

View file

@ -1275,18 +1275,23 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
struct dma_resv *resv = e->tv.bo->base.resv;
struct dma_fence_chain *chain = e->chain;
struct dma_resv_iter cursor;
struct dma_fence *fence;
if (!chain)
continue;
/*
* Work around dma_resv shortcomings by wrapping up the
* submission in a dma_fence_chain and add it as exclusive
* Temporary workaround dma_resv shortcommings by wrapping up
* the submission in a dma_fence_chain and add it as exclusive
* fence.
*
* TODO: Remove together with dma_resv rework.
*/
dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
dma_fence_get(p->fence), 1);
dma_resv_for_each_fence(&cursor, resv, false, fence) {
break;
}
dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
rcu_assign_pointer(resv->fence_excl, &chain->base);
e->chain = NULL;
}

View file

@ -26,23 +26,12 @@
#include "amdgpu.h"
struct amdgpu_gtt_node {
struct ttm_buffer_object *tbo;
struct ttm_range_mgr_node base;
};
static inline struct amdgpu_gtt_mgr *
to_gtt_mgr(struct ttm_resource_manager *man)
{
return container_of(man, struct amdgpu_gtt_mgr, manager);
}
static inline struct amdgpu_gtt_node *
to_amdgpu_gtt_node(struct ttm_resource *res)
{
return container_of(res, struct amdgpu_gtt_node, base.base);
}
/**
* DOC: mem_info_gtt_total
*
@ -106,9 +95,9 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
*/
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
{
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
return drm_mm_node_allocated(&node->base.mm_nodes[0]);
return drm_mm_node_allocated(&node->mm_nodes[0]);
}
/**
@ -128,15 +117,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
{
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
uint32_t num_pages = PFN_UP(tbo->base.size);
struct amdgpu_gtt_node *node;
struct ttm_range_mgr_node *node;
int r;
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
if (!node)
return -ENOMEM;
node->tbo = tbo;
ttm_resource_init(tbo, place, &node->base.base);
ttm_resource_init(tbo, place, &node->base);
if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
ttm_resource_manager_usage(man) > man->size) {
r = -ENOSPC;
@ -145,8 +133,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
if (place->lpfn) {
spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range(&mgr->mm,
&node->base.mm_nodes[0],
r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0],
num_pages, tbo->page_alignment,
0, place->fpfn, place->lpfn,
DRM_MM_INSERT_BEST);
@ -154,18 +141,18 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
if (unlikely(r))
goto err_free;
node->base.base.start = node->base.mm_nodes[0].start;
node->base.start = node->mm_nodes[0].start;
} else {
node->base.mm_nodes[0].start = 0;
node->base.mm_nodes[0].size = node->base.base.num_pages;
node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
node->mm_nodes[0].start = 0;
node->mm_nodes[0].size = node->base.num_pages;
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
*res = &node->base.base;
*res = &node->base;
return 0;
err_free:
ttm_resource_fini(man, &node->base.base);
ttm_resource_fini(man, &node->base);
kfree(node);
return r;
}
@ -181,12 +168,12 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
spin_lock(&mgr->lock);
if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
drm_mm_remove_node(&node->base.mm_nodes[0]);
if (drm_mm_node_allocated(&node->mm_nodes[0]))
drm_mm_remove_node(&node->mm_nodes[0]);
spin_unlock(&mgr->lock);
ttm_resource_fini(man, res);
@ -202,15 +189,15 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
*/
void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
{
struct amdgpu_gtt_node *node;
struct ttm_range_mgr_node *node;
struct drm_mm_node *mm_node;
struct amdgpu_device *adev;
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
spin_lock(&mgr->lock);
drm_mm_for_each_node(mm_node, &mgr->mm) {
node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
amdgpu_ttm_recover_gart(node->tbo);
node = container_of(mm_node, typeof(*node), mm_nodes[0]);
amdgpu_ttm_recover_gart(node->base.bo);
}
spin_unlock(&mgr->lock);

View file

@ -107,36 +107,19 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
u32 pasid)
{
struct dma_fence *fence, **fences;
struct amdgpu_pasid_cb *cb;
unsigned count;
struct dma_fence *fence;
int r;
r = dma_resv_get_fences(resv, true, &count, &fences);
r = dma_resv_get_singleton(resv, true, &fence);
if (r)
goto fallback;
if (count == 0) {
if (!fence) {
amdgpu_pasid_free(pasid);
return;
}
if (count == 1) {
fence = fences[0];
kfree(fences);
} else {
uint64_t context = dma_fence_context_alloc(1);
struct dma_fence_array *array;
array = dma_fence_array_create(count, fences, context,
1, false);
if (!array) {
kfree(fences);
goto fallback;
}
fence = &array->base;
}
cb = kmalloc(sizeof(*cb), GFP_KERNEL);
if (!cb) {
/* Last resort when we are OOM */

View file

@ -1390,6 +1390,14 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
struct dma_resv *resv = bo->tbo.base.resv;
int r;
r = dma_resv_reserve_fences(resv, 1);
if (r) {
/* As last resort on OOM we block for the fence */
dma_fence_wait(fence, false);
return;
}
if (shared)
dma_resv_add_shared_fence(resv, fence);

View file

@ -1547,7 +1547,6 @@ static struct ttm_device_funcs amdgpu_bo_driver = {
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
.access_memory = &amdgpu_ttm_access_memory,
.del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
};
/*

View file

@ -377,7 +377,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
dma_resv_assert_held(vm->root.bo->tbo.base.resv);
vm->bulk_moveable = false;
ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
amdgpu_vm_bo_relocated(base);
else
@ -639,36 +639,6 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
list_add(&entry->tv.head, validated);
}
/**
* amdgpu_vm_del_from_lru_notify - update bulk_moveable flag
*
* @bo: BO which was removed from the LRU
*
* Make sure the bulk_moveable flag is updated when a BO is removed from the
* LRU.
*/
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_bo *abo;
struct amdgpu_vm_bo_base *bo_base;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
if (bo->pin_count)
return;
abo = ttm_to_amdgpu_bo(bo);
if (!abo->parent)
return;
for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv)
vm->bulk_moveable = false;
}
}
/**
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
*
@ -681,35 +651,9 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
spin_lock(&adev->mman.bdev.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&adev->mman.bdev.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
spin_lock(&adev->mman.bdev.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
if (!bo->parent)
continue;
ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
&vm->lru_bulk_move);
if (shadow)
ttm_bo_move_to_lru_tail(&shadow->tbo,
shadow->tbo.resource,
&vm->lru_bulk_move);
}
ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
spin_unlock(&adev->mman.bdev.lru_lock);
vm->bulk_moveable = true;
}
/**
@ -732,8 +676,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_vm_bo_base *bo_base, *tmp;
int r;
vm->bulk_moveable &= list_empty(&vm->evicted);
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
@ -1057,10 +999,16 @@ static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry)
if (!entry->bo)
return;
shadow = amdgpu_bo_shadowed(entry->bo);
if (shadow) {
ttm_bo_set_bulk_move(&shadow->tbo, NULL);
amdgpu_bo_unref(&shadow);
}
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
entry->bo->vm_bo = NULL;
list_del(&entry->vm_status);
amdgpu_bo_unref(&shadow);
amdgpu_bo_unref(&entry->bo);
}
@ -1080,8 +1028,6 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_bo_base *entry;
vm->bulk_moveable = false;
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
amdgpu_vm_free_table(entry);
@ -2665,7 +2611,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
if (bo) {
dma_resv_assert_held(bo->tbo.base.resv);
if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
vm->bulk_moveable = false;
ttm_bo_set_bulk_move(&bo->tbo, NULL);
for (base = &bo_va->base.bo->vm_bo; *base;
base = &(*base)->next) {
@ -2980,7 +2926,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r)
goto error_free_root;
r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
if (r)
goto error_unreserve;
@ -3423,7 +3369,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
value = 0;
}
r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
if (r) {
pr_debug("failed %d to reserve fence slot\n", r);
goto error_unlock;

View file

@ -317,8 +317,6 @@ struct amdgpu_vm {
/* Store positions of group of BOs */
struct ttm_lru_bulk_move lru_bulk_move;
/* mark whether can do the bulk move */
bool bulk_moveable;
/* Flag to indicate if VM is used for compute */
bool is_compute_context;
};
@ -454,7 +452,6 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
uint64_t *gtt_mem, uint64_t *cpu_mem);

View file

@ -548,7 +548,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
goto reserve_bo_failed;
}
r = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
if (r) {
pr_debug("failed %d to reserve bo\n", r);
amdgpu_bo_unreserve(bo);

View file

@ -135,7 +135,6 @@ static void komeda_plane_destroy(struct drm_plane *plane)
static void komeda_plane_reset(struct drm_plane *plane)
{
struct komeda_plane_state *state;
struct komeda_plane *kplane = to_kplane(plane);
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
@ -144,16 +143,8 @@ static void komeda_plane_reset(struct drm_plane *plane)
plane->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state) {
state->base.rotation = DRM_MODE_ROTATE_0;
state->base.pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
state->base.alpha = DRM_BLEND_ALPHA_OPAQUE;
state->base.zpos = kplane->layer->base.id;
state->base.color_encoding = DRM_COLOR_YCBCR_BT601;
state->base.color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
plane->state = &state->base;
plane->state->plane = plane;
}
if (state)
__drm_atomic_helper_plane_reset(plane, &state->base);
}
static struct drm_plane_state *

View file

@ -77,6 +77,7 @@ config DRM_DISPLAY_CONNECTOR
config DRM_ITE_IT6505
tristate "ITE IT6505 DisplayPort bridge"
depends on OF
select DRM_DP_HELPER
select DRM_KMS_HELPER
select EXTCON
help
@ -265,6 +266,7 @@ config DRM_TOSHIBA_TC358767
select DRM_DP_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_MIPI_DSI
select DRM_PANEL
help
Toshiba TC358767 eDP bridge chip driver.

View file

@ -1313,6 +1313,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511_audio_exit(adv7511);
drm_bridge_remove(&adv7511->bridge);
err_unregister_cec:
cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_packet:

View file

@ -1119,9 +1119,7 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
return 0;
}
pm_runtime_get_sync(dp->dev);
edid = drm_get_edid(connector, &dp->aux.ddc);
pm_runtime_put(dp->dev);
if (edid) {
drm_connector_update_edid_property(&dp->connector,
edid);
@ -1632,8 +1630,20 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
struct analogix_dp_device *dp = to_dp(aux);
int ret;
return analogix_dp_transfer(dp, msg);
pm_runtime_get_sync(dp->dev);
ret = analogix_dp_detect_hpd(dp);
if (ret)
goto out;
ret = analogix_dp_transfer(dp, msg);
out:
pm_runtime_mark_last_busy(dp->dev);
pm_runtime_put_autosuspend(dp->dev);
return ret;
}
struct analogix_dp_device *
@ -1764,6 +1774,8 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
if (ret)
return ret;
pm_runtime_use_autosuspend(dp->dev);
pm_runtime_set_autosuspend_delay(dp->dev, 100);
pm_runtime_enable(dp->dev);
ret = analogix_dp_create_bridge(drm_dev, dp);
@ -1775,6 +1787,7 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
return 0;
err_disable_pm_runtime:
pm_runtime_dont_use_autosuspend(dp->dev);
pm_runtime_disable(dp->dev);
drm_dp_aux_unregister(&dp->aux);
@ -1793,6 +1806,7 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
}
drm_dp_aux_unregister(&dp->aux);
pm_runtime_dont_use_autosuspend(dp->dev);
pm_runtime_disable(dp->dev);
}
EXPORT_SYMBOL_GPL(analogix_dp_unbind);

View file

@ -874,7 +874,10 @@ static int anx7625_hdcp_enable(struct anx7625_data *ctx)
}
/* Read downstream capability */
anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, 0x68028, 1, &bcap);
ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, 0x68028, 1, &bcap);
if (ret < 0)
return ret;
if (!(bcap & 0x01)) {
pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap);
return 0;
@ -921,12 +924,20 @@ static void anx7625_dp_start(struct anx7625_data *ctx)
{
int ret;
struct device *dev = &ctx->client->dev;
u8 data;
if (!ctx->display_timing_valid) {
DRM_DEV_ERROR(dev, "mipi not set display timing yet.\n");
return;
}
dev_dbg(dev, "set downstream sink into normal\n");
/* Downstream sink enter into normal mode */
data = 1;
ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, 0x000600, 1, &data);
if (ret < 0)
dev_err(dev, "IO error : set sink into normal mode fail\n");
/* Disable HDCP */
anx7625_write_and(ctx, ctx->i2c.rx_p1_client, 0xee, 0x9f);
@ -1608,8 +1619,6 @@ static int anx7625_parse_dt(struct device *dev,
struct anx7625_platform_data *pdata)
{
struct device_node *np = dev->of_node, *ep0;
struct drm_panel *panel;
int ret;
int bus_type, mipi_lanes;
anx7625_get_swing_setting(dev, pdata);
@ -1646,18 +1655,14 @@ static int anx7625_parse_dt(struct device *dev,
if (of_property_read_bool(np, "analogix,audio-enable"))
pdata->audio_en = 1;
ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
if (ret < 0) {
if (ret == -ENODEV)
pdata->panel_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
if (IS_ERR(pdata->panel_bridge)) {
if (PTR_ERR(pdata->panel_bridge) == -ENODEV)
return 0;
return ret;
}
if (!panel)
return -ENODEV;
pdata->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(pdata->panel_bridge))
return PTR_ERR(pdata->panel_bridge);
}
DRM_DEV_DEBUG_DRIVER(dev, "get panel node.\n");
return 0;
@ -2011,7 +2016,8 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_VIDEO_HSE;
MIPI_DSI_MODE_VIDEO_HSE |
MIPI_DSI_HS_PKT_END_ALIGNED;
ret = devm_mipi_dsi_attach(dev, dsi);
if (ret) {
@ -2654,7 +2660,7 @@ static int anx7625_i2c_probe(struct i2c_client *client,
if (ret) {
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
return ret;
goto free_wq;
}
if (anx7625_register_i2c_dummy_clients(platform, client) != 0) {
@ -2669,7 +2675,7 @@ static int anx7625_i2c_probe(struct i2c_client *client,
pm_suspend_ignore_children(dev, true);
ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev);
if (ret)
return ret;
goto free_wq;
if (!platform->pdata.low_power_mode) {
anx7625_disable_pd_protocol(platform);

View file

@ -11,12 +11,24 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#define VENDOR_ID 0x00
#define DEVICE_ID_H 0x01
#define DEVICE_ID_L 0x02
#define VERSION_ID 0x03
#define FIRMWARE_VERSION 0x08
#define CONFIG_FINISH 0x09
#define PD_CTRL(n) (0x0a + ((n) & 0x3)) /* 0..3 */
#define RST_CTRL(n) (0x0e + ((n) & 0x1)) /* 0..1 */
#define SYS_CTRL(n) (0x10 + ((n) & 0x7)) /* 0..4 */
#define RGB_DRV(n) (0x18 + ((n) & 0x3)) /* 0..3 */
#define RGB_DLY(n) (0x1c + ((n) & 0x1)) /* 0..1 */
#define RGB_TEST_CTRL 0x1e
#define ATE_PLL_EN 0x1f
#define HACTIVE_LI 0x20
#define VACTIVE_LI 0x21
#define VACTIVE_HACTIVE_HI 0x22
@ -24,19 +36,114 @@
#define HSYNC_LI 0x24
#define HBP_LI 0x25
#define HFP_HSW_HBP_HI 0x26
#define HFP_HSW_HBP_HI_HFP(n) (((n) & 0x300) >> 4)
#define HFP_HSW_HBP_HI_HS(n) (((n) & 0x300) >> 6)
#define HFP_HSW_HBP_HI_HBP(n) (((n) & 0x300) >> 8)
#define VFP 0x27
#define VSYNC 0x28
#define VBP 0x29
#define BIST_POL 0x2a
#define BIST_POL_BIST_MODE(n) (((n) & 0xf) << 4)
#define BIST_POL_BIST_GEN BIT(3)
#define BIST_POL_HSYNC_POL BIT(2)
#define BIST_POL_VSYNC_POL BIT(1)
#define BIST_POL_DE_POL BIT(0)
#define BIST_RED 0x2b
#define BIST_GREEN 0x2c
#define BIST_BLUE 0x2d
#define BIST_CHESS_X 0x2e
#define BIST_CHESS_Y 0x2f
#define BIST_CHESS_XY_H 0x30
#define BIST_FRAME_TIME_L 0x31
#define BIST_FRAME_TIME_H 0x32
#define FIFO_MAX_ADDR_LOW 0x33
#define SYNC_EVENT_DLY 0x34
#define HSW_MIN 0x35
#define HFP_MIN 0x36
#define LOGIC_RST_NUM 0x37
#define OSC_CTRL(n) (0x48 + ((n) & 0x7)) /* 0..5 */
#define BG_CTRL 0x4e
#define LDO_PLL 0x4f
#define PLL_CTRL(n) (0x50 + ((n) & 0xf)) /* 0..15 */
#define PLL_CTRL_6_EXTERNAL 0x90
#define PLL_CTRL_6_MIPI_CLK 0x92
#define PLL_CTRL_6_INTERNAL 0x93
#define PLL_REM(n) (0x60 + ((n) & 0x3)) /* 0..2 */
#define PLL_DIV(n) (0x63 + ((n) & 0x3)) /* 0..2 */
#define PLL_FRAC(n) (0x66 + ((n) & 0x3)) /* 0..2 */
#define PLL_INT(n) (0x69 + ((n) & 0x1)) /* 0..1 */
#define PLL_REF_DIV 0x6b
#define PLL_REF_DIV_P(n) ((n) & 0xf)
#define PLL_REF_DIV_Pe BIT(4)
#define PLL_REF_DIV_S(n) (((n) & 0x7) << 5)
#define PLL_SSC_P(n) (0x6c + ((n) & 0x3)) /* 0..2 */
#define PLL_SSC_STEP(n) (0x6f + ((n) & 0x3)) /* 0..2 */
#define PLL_SSC_OFFSET(n) (0x72 + ((n) & 0x3)) /* 0..3 */
#define GPIO_OEN 0x79
#define MIPI_CFG_PW 0x7a
#define MIPI_CFG_PW_CONFIG_DSI 0xc1
#define MIPI_CFG_PW_CONFIG_I2C 0x3e
#define GPIO_SEL(n) (0x7b + ((n) & 0x1)) /* 0..1 */
#define IRQ_SEL 0x7d
#define DBG_SEL 0x7e
#define DBG_SIGNAL 0x7f
#define MIPI_ERR_VECTOR_L 0x80
#define MIPI_ERR_VECTOR_H 0x81
#define MIPI_ERR_VECTOR_EN_L 0x82
#define MIPI_ERR_VECTOR_EN_H 0x83
#define MIPI_MAX_SIZE_L 0x84
#define MIPI_MAX_SIZE_H 0x85
#define DSI_CTRL 0x86
#define DSI_CTRL_UNKNOWN 0x28
#define DSI_CTRL_DSI_LANES(n) ((n) & 0x3)
#define MIPI_PN_SWAP 0x87
#define MIPI_PN_SWAP_CLK BIT(4)
#define MIPI_PN_SWAP_D(n) BIT((n) & 0x3)
#define MIPI_SOT_SYNC_BIT_(n) (0x88 + ((n) & 0x1)) /* 0..1 */
#define MIPI_ULPS_CTRL 0x8a
#define MIPI_CLK_CHK_VAR 0x8e
#define MIPI_CLK_CHK_INI 0x8f
#define MIPI_T_TERM_EN 0x90
#define MIPI_T_HS_SETTLE 0x91
#define MIPI_T_TA_SURE_PRE 0x92
#define MIPI_T_LPX_SET 0x94
#define MIPI_T_CLK_MISS 0x95
#define MIPI_INIT_TIME_L 0x96
#define MIPI_INIT_TIME_H 0x97
#define MIPI_T_CLK_TERM_EN 0x99
#define MIPI_T_CLK_SETTLE 0x9a
#define MIPI_TO_HS_RX_L 0x9e
#define MIPI_TO_HS_RX_H 0x9f
#define MIPI_PHY_(n) (0xa0 + ((n) & 0x7)) /* 0..5 */
#define MIPI_PD_RX 0xb0
#define MIPI_PD_TERM 0xb1
#define MIPI_PD_HSRX 0xb2
#define MIPI_PD_LPTX 0xb3
#define MIPI_PD_LPRX 0xb4
#define MIPI_PD_CK_LANE 0xb5
#define MIPI_FORCE_0 0xb6
#define MIPI_RST_CTRL 0xb7
#define MIPI_RST_NUM 0xb8
#define MIPI_DBG_SET_(n) (0xc0 + ((n) & 0xf)) /* 0..9 */
#define MIPI_DBG_SEL 0xe0
#define MIPI_DBG_DATA 0xe1
#define MIPI_ATE_TEST_SEL 0xe2
#define MIPI_ATE_STATUS_(n) (0xe3 + ((n) & 0x1)) /* 0..1 */
#define MIPI_ATE_STATUS_1 0xe4
#define ICN6211_MAX_REGISTER MIPI_ATE_STATUS(1)
struct chipone {
struct device *dev;
struct i2c_client *client;
struct drm_bridge bridge;
struct drm_display_mode mode;
struct drm_bridge *panel_bridge;
struct mipi_dsi_device *dsi;
struct gpio_desc *enable_gpio;
struct regulator *vdd1;
struct regulator *vdd2;
struct regulator *vdd3;
bool interface_i2c;
};
static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge)
@ -44,70 +151,199 @@ static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge)
return container_of(bridge, struct chipone, bridge);
}
static inline int chipone_dsi_write(struct chipone *icn, const void *seq,
size_t len)
static void chipone_readb(struct chipone *icn, u8 reg, u8 *val)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(icn->dev);
return mipi_dsi_generic_write(dsi, seq, len);
if (icn->interface_i2c)
*val = i2c_smbus_read_byte_data(icn->client, reg);
else
mipi_dsi_generic_read(icn->dsi, (u8[]){reg, 1}, 2, val, 1);
}
#define ICN6211_DSI(icn, seq...) \
{ \
const u8 d[] = { seq }; \
chipone_dsi_write(icn, d, ARRAY_SIZE(d)); \
static int chipone_writeb(struct chipone *icn, u8 reg, u8 val)
{
if (icn->interface_i2c)
return i2c_smbus_write_byte_data(icn->client, reg, val);
else
return mipi_dsi_generic_write(icn->dsi, (u8[]){reg, val}, 2);
}
static void chipone_configure_pll(struct chipone *icn,
const struct drm_display_mode *mode)
{
unsigned int best_p = 0, best_m = 0, best_s = 0;
unsigned int mode_clock = mode->clock * 1000;
unsigned int delta, min_delta = 0xffffffff;
unsigned int freq_p, freq_s, freq_out;
unsigned int p_min, p_max;
unsigned int p, m, s;
unsigned int fin;
bool best_p_pot;
u8 ref_div;
/*
* DSI byte clock frequency (input into PLL) is calculated as:
* DSI_CLK = mode clock * bpp / dsi_data_lanes / 8
*
* DPI pixel clock frequency (output from PLL) is mode clock.
*
* The chip contains fractional PLL which works as follows:
* DPI_CLK = ((DSI_CLK / P) * M) / S
* P is pre-divider, register PLL_REF_DIV[3:0] is 1:n divider
* register PLL_REF_DIV[4] is extra 1:2 divider
* M is integer multiplier, register PLL_INT(0) is multiplier
* S is post-divider, register PLL_REF_DIV[7:5] is 2^(n+1) divider
*
* It seems the PLL input clock after applying P pre-divider have
* to be lower than 20 MHz.
*/
fin = mode_clock * mipi_dsi_pixel_format_to_bpp(icn->dsi->format) /
icn->dsi->lanes / 8; /* in Hz */
/* Minimum value of P predivider for PLL input in 5..20 MHz */
p_min = clamp(DIV_ROUND_UP(fin, 20000000), 1U, 31U);
p_max = clamp(fin / 5000000, 1U, 31U);
for (p = p_min; p < p_max; p++) { /* PLL_REF_DIV[4,3:0] */
if (p > 16 && p & 1) /* P > 16 uses extra /2 */
continue;
freq_p = fin / p;
if (freq_p == 0) /* Divider too high */
break;
for (s = 0; s < 0x7; s++) { /* PLL_REF_DIV[7:5] */
freq_s = freq_p / BIT(s + 1);
if (freq_s == 0) /* Divider too high */
break;
m = mode_clock / freq_s;
/* Multiplier is 8 bit */
if (m > 0xff)
continue;
/* Limit PLL VCO frequency to 1 GHz */
freq_out = (fin * m) / p;
if (freq_out > 1000000000)
continue;
/* Apply post-divider */
freq_out /= BIT(s + 1);
delta = abs(mode_clock - freq_out);
if (delta < min_delta) {
best_p = p;
best_m = m;
best_s = s;
min_delta = delta;
}
}
}
best_p_pot = !(best_p & 1);
dev_dbg(icn->dev,
"PLL: P[3:0]=%d P[4]=2*%d M=%d S[7:5]=2^%d delta=%d => DSI f_in=%d Hz ; DPI f_out=%d Hz\n",
best_p >> best_p_pot, best_p_pot, best_m, best_s + 1,
min_delta, fin, (fin * best_m) / (best_p << (best_s + 1)));
ref_div = PLL_REF_DIV_P(best_p >> best_p_pot) | PLL_REF_DIV_S(best_s);
if (best_p_pot) /* Prefer /2 pre-divider */
ref_div |= PLL_REF_DIV_Pe;
/* Clock source selection fixed to MIPI DSI clock lane */
chipone_writeb(icn, PLL_CTRL(6), PLL_CTRL_6_MIPI_CLK);
chipone_writeb(icn, PLL_REF_DIV, ref_div);
chipone_writeb(icn, PLL_INT(0), best_m);
}
static void chipone_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct chipone *icn = bridge_to_chipone(bridge);
struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_display_mode *mode = &icn->mode;
const struct drm_bridge_state *bridge_state;
u16 hfp, hbp, hsync;
u32 bus_flags;
u8 pol, id[4];
ICN6211_DSI(icn, 0x7a, 0xc1);
chipone_readb(icn, VENDOR_ID, id);
chipone_readb(icn, DEVICE_ID_H, id + 1);
chipone_readb(icn, DEVICE_ID_L, id + 2);
chipone_readb(icn, VERSION_ID, id + 3);
ICN6211_DSI(icn, HACTIVE_LI, mode->hdisplay & 0xff);
dev_dbg(icn->dev,
"Chip IDs: Vendor=0x%02x Device=0x%02x:0x%02x Version=0x%02x\n",
id[0], id[1], id[2], id[3]);
ICN6211_DSI(icn, VACTIVE_LI, mode->vdisplay & 0xff);
if (id[0] != 0xc1 || id[1] != 0x62 || id[2] != 0x11) {
dev_dbg(icn->dev, "Invalid Chip IDs, aborting configuration\n");
return;
}
/**
/* Get the DPI flags from the bridge state. */
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
bus_flags = bridge_state->output_bus_cfg.flags;
if (icn->interface_i2c)
chipone_writeb(icn, MIPI_CFG_PW, MIPI_CFG_PW_CONFIG_I2C);
else
chipone_writeb(icn, MIPI_CFG_PW, MIPI_CFG_PW_CONFIG_DSI);
chipone_writeb(icn, HACTIVE_LI, mode->hdisplay & 0xff);
chipone_writeb(icn, VACTIVE_LI, mode->vdisplay & 0xff);
/*
* lsb nibble: 2nd nibble of hdisplay
* msb nibble: 2nd nibble of vdisplay
*/
ICN6211_DSI(icn, VACTIVE_HACTIVE_HI,
((mode->hdisplay >> 8) & 0xf) |
(((mode->vdisplay >> 8) & 0xf) << 4));
chipone_writeb(icn, VACTIVE_HACTIVE_HI,
((mode->hdisplay >> 8) & 0xf) |
(((mode->vdisplay >> 8) & 0xf) << 4));
ICN6211_DSI(icn, HFP_LI, mode->hsync_start - mode->hdisplay);
hfp = mode->hsync_start - mode->hdisplay;
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
ICN6211_DSI(icn, HSYNC_LI, mode->hsync_end - mode->hsync_start);
chipone_writeb(icn, HFP_LI, hfp & 0xff);
chipone_writeb(icn, HSYNC_LI, hsync & 0xff);
chipone_writeb(icn, HBP_LI, hbp & 0xff);
/* Top two bits of Horizontal Front porch/Sync/Back porch */
chipone_writeb(icn, HFP_HSW_HBP_HI,
HFP_HSW_HBP_HI_HFP(hfp) |
HFP_HSW_HBP_HI_HS(hsync) |
HFP_HSW_HBP_HI_HBP(hbp));
ICN6211_DSI(icn, HBP_LI, mode->htotal - mode->hsync_end);
chipone_writeb(icn, VFP, mode->vsync_start - mode->vdisplay);
ICN6211_DSI(icn, HFP_HSW_HBP_HI, 0x00);
chipone_writeb(icn, VSYNC, mode->vsync_end - mode->vsync_start);
ICN6211_DSI(icn, VFP, mode->vsync_start - mode->vdisplay);
ICN6211_DSI(icn, VSYNC, mode->vsync_end - mode->vsync_start);
ICN6211_DSI(icn, VBP, mode->vtotal - mode->vsync_end);
chipone_writeb(icn, VBP, mode->vtotal - mode->vsync_end);
/* dsi specific sequence */
ICN6211_DSI(icn, MIPI_DCS_SET_TEAR_OFF, 0x80);
ICN6211_DSI(icn, MIPI_DCS_SET_ADDRESS_MODE, 0x28);
ICN6211_DSI(icn, 0xb5, 0xa0);
ICN6211_DSI(icn, 0x5c, 0xff);
ICN6211_DSI(icn, MIPI_DCS_SET_COLUMN_ADDRESS, 0x01);
ICN6211_DSI(icn, MIPI_DCS_GET_POWER_SAVE, 0x92);
ICN6211_DSI(icn, 0x6b, 0x71);
ICN6211_DSI(icn, 0x69, 0x2b);
ICN6211_DSI(icn, MIPI_DCS_ENTER_SLEEP_MODE, 0x40);
ICN6211_DSI(icn, MIPI_DCS_EXIT_SLEEP_MODE, 0x98);
chipone_writeb(icn, SYNC_EVENT_DLY, 0x80);
chipone_writeb(icn, HFP_MIN, hfp & 0xff);
chipone_writeb(icn, MIPI_PD_CK_LANE, 0xa0);
chipone_writeb(icn, PLL_CTRL(12), 0xff);
chipone_writeb(icn, MIPI_PN_SWAP, 0x00);
/* DPI HS/VS/DE polarity */
pol = ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIST_POL_HSYNC_POL : 0) |
((mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIST_POL_VSYNC_POL : 0) |
((bus_flags & DRM_BUS_FLAG_DE_HIGH) ? BIST_POL_DE_POL : 0);
chipone_writeb(icn, BIST_POL, pol);
/* Configure PLL settings */
chipone_configure_pll(icn, mode);
chipone_writeb(icn, SYS_CTRL(0), 0x40);
chipone_writeb(icn, SYS_CTRL(1), 0x88);
/* icn6211 specific sequence */
ICN6211_DSI(icn, 0xb6, 0x20);
ICN6211_DSI(icn, 0x51, 0x20);
ICN6211_DSI(icn, 0x09, 0x10);
chipone_writeb(icn, MIPI_FORCE_0, 0x20);
chipone_writeb(icn, PLL_CTRL(1), 0x20);
chipone_writeb(icn, CONFIG_FINISH, 0x10);
usleep_range(10000, 11000);
}
@ -168,6 +404,67 @@ static void chipone_mode_set(struct drm_bridge *bridge,
struct chipone *icn = bridge_to_chipone(bridge);
drm_mode_copy(&icn->mode, adjusted_mode);
};
static int chipone_dsi_attach(struct chipone *icn)
{
struct mipi_dsi_device *dsi = icn->dsi;
int ret;
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
ret = mipi_dsi_attach(dsi);
if (ret < 0)
dev_err(icn->dev, "failed to attach dsi\n");
return ret;
}
static int chipone_dsi_host_attach(struct chipone *icn)
{
struct device *dev = icn->dev;
struct device_node *host_node;
struct device_node *endpoint;
struct mipi_dsi_device *dsi;
struct mipi_dsi_host *host;
int ret = 0;
const struct mipi_dsi_device_info info = {
.type = "chipone",
.channel = 0,
.node = NULL,
};
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
host_node = of_graph_get_remote_port_parent(endpoint);
of_node_put(endpoint);
if (!host_node)
return -EINVAL;
host = of_find_mipi_dsi_host_by_node(host_node);
of_node_put(host_node);
if (!host) {
dev_err(dev, "failed to find dsi host\n");
return -EPROBE_DEFER;
}
dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dsi)) {
return dev_err_probe(dev, PTR_ERR(dsi),
"failed to create dsi device\n");
}
icn->dsi = dsi;
ret = chipone_dsi_attach(icn);
if (ret < 0)
mipi_dsi_device_unregister(dsi);
return ret;
}
static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags)
@ -177,6 +474,32 @@ static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flag
return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags);
}
#define MAX_INPUT_SEL_FORMATS 1
static u32 *
chipone_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
u32 *input_fmts;
*num_input_fmts = 0;
input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
GFP_KERNEL);
if (!input_fmts)
return NULL;
/* This is the DSI-end bus format */
input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
*num_input_fmts = 1;
return input_fmts;
}
static const struct drm_bridge_funcs chipone_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@ -186,6 +509,7 @@ static const struct drm_bridge_funcs chipone_bridge_funcs = {
.atomic_post_disable = chipone_atomic_post_disable,
.mode_set = chipone_mode_set,
.attach = chipone_attach,
.atomic_get_input_bus_fmts = chipone_atomic_get_input_bus_fmts,
};
static int chipone_parse_dt(struct chipone *icn)
@ -233,9 +557,8 @@ static int chipone_parse_dt(struct chipone *icn)
return 0;
}
static int chipone_probe(struct mipi_dsi_device *dsi)
static int chipone_common_probe(struct device *dev, struct chipone **icnr)
{
struct device *dev = &dsi->dev;
struct chipone *icn;
int ret;
@ -243,7 +566,6 @@ static int chipone_probe(struct mipi_dsi_device *dsi)
if (!icn)
return -ENOMEM;
mipi_dsi_set_drvdata(dsi, icn);
icn->dev = dev;
ret = chipone_parse_dt(icn);
@ -254,22 +576,57 @@ static int chipone_probe(struct mipi_dsi_device *dsi)
icn->bridge.type = DRM_MODE_CONNECTOR_DPI;
icn->bridge.of_node = dev->of_node;
drm_bridge_add(&icn->bridge);
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
drm_bridge_remove(&icn->bridge);
dev_err(dev, "failed to attach dsi\n");
}
*icnr = icn;
return ret;
}
static int chipone_remove(struct mipi_dsi_device *dsi)
static int chipone_dsi_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
struct chipone *icn;
int ret;
ret = chipone_common_probe(dev, &icn);
if (ret)
return ret;
icn->interface_i2c = false;
icn->dsi = dsi;
mipi_dsi_set_drvdata(dsi, icn);
drm_bridge_add(&icn->bridge);
ret = chipone_dsi_attach(icn);
if (ret)
drm_bridge_remove(&icn->bridge);
return ret;
}
static int chipone_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct chipone *icn;
int ret;
ret = chipone_common_probe(dev, &icn);
if (ret)
return ret;
icn->interface_i2c = true;
icn->client = client;
dev_set_drvdata(dev, icn);
i2c_set_clientdata(client, icn);
drm_bridge_add(&icn->bridge);
return chipone_dsi_host_attach(icn);
}
static int chipone_dsi_remove(struct mipi_dsi_device *dsi)
{
struct chipone *icn = mipi_dsi_get_drvdata(dsi);
@ -285,16 +642,48 @@ static const struct of_device_id chipone_of_match[] = {
};
MODULE_DEVICE_TABLE(of, chipone_of_match);
static struct mipi_dsi_driver chipone_driver = {
.probe = chipone_probe,
.remove = chipone_remove,
static struct mipi_dsi_driver chipone_dsi_driver = {
.probe = chipone_dsi_probe,
.remove = chipone_dsi_remove,
.driver = {
.name = "chipone-icn6211",
.owner = THIS_MODULE,
.of_match_table = chipone_of_match,
},
};
module_mipi_dsi_driver(chipone_driver);
static struct i2c_device_id chipone_i2c_id[] = {
{ "chipone,icn6211" },
{},
};
MODULE_DEVICE_TABLE(i2c, chipone_i2c_id);
static struct i2c_driver chipone_i2c_driver = {
.probe = chipone_i2c_probe,
.id_table = chipone_i2c_id,
.driver = {
.name = "chipone-icn6211-i2c",
.of_match_table = chipone_of_match,
},
};
static int __init chipone_init(void)
{
if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
mipi_dsi_driver_register(&chipone_dsi_driver);
return i2c_add_driver(&chipone_i2c_driver);
}
module_init(chipone_init);
static void __exit chipone_exit(void)
{
i2c_del_driver(&chipone_i2c_driver);
if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
mipi_dsi_driver_unregister(&chipone_dsi_driver);
}
module_exit(chipone_exit);
MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
MODULE_DESCRIPTION("Chipone ICN6211 MIPI-DSI to RGB Converter Bridge");

View file

@ -27,6 +27,8 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <sound/hdmi-codec.h>
#define IT66121_VENDOR_ID0_REG 0x00
#define IT66121_VENDOR_ID1_REG 0x01
#define IT66121_DEVICE_ID0_REG 0x02
@ -155,6 +157,9 @@
#define IT66121_AV_MUTE_ON BIT(0)
#define IT66121_AV_MUTE_BLUESCR BIT(1)
#define IT66121_PKT_CTS_CTRL_REG 0xC5
#define IT66121_PKT_CTS_CTRL_SEL BIT(1)
#define IT66121_PKT_GEN_CTRL_REG 0xC6
#define IT66121_PKT_GEN_CTRL_ON BIT(0)
#define IT66121_PKT_GEN_CTRL_RPT BIT(1)
@ -202,6 +207,89 @@
#define IT66121_EDID_SLEEP_US 20000
#define IT66121_EDID_TIMEOUT_US 200000
#define IT66121_EDID_FIFO_SIZE 32
#define IT66121_CLK_CTRL0_REG 0x58
#define IT66121_CLK_CTRL0_AUTO_OVER_SAMPLING BIT(4)
#define IT66121_CLK_CTRL0_EXT_MCLK_MASK GENMASK(3, 2)
#define IT66121_CLK_CTRL0_EXT_MCLK_128FS (0 << 2)
#define IT66121_CLK_CTRL0_EXT_MCLK_256FS BIT(2)
#define IT66121_CLK_CTRL0_EXT_MCLK_512FS (2 << 2)
#define IT66121_CLK_CTRL0_EXT_MCLK_1024FS (3 << 2)
#define IT66121_CLK_CTRL0_AUTO_IPCLK BIT(0)
#define IT66121_CLK_STATUS1_REG 0x5E
#define IT66121_CLK_STATUS2_REG 0x5F
#define IT66121_AUD_CTRL0_REG 0xE0
#define IT66121_AUD_SWL (3 << 6)
#define IT66121_AUD_16BIT (0 << 6)
#define IT66121_AUD_18BIT BIT(6)
#define IT66121_AUD_20BIT (2 << 6)
#define IT66121_AUD_24BIT (3 << 6)
#define IT66121_AUD_SPDIFTC BIT(5)
#define IT66121_AUD_SPDIF BIT(4)
#define IT66121_AUD_I2S (0 << 4)
#define IT66121_AUD_EN_I2S3 BIT(3)
#define IT66121_AUD_EN_I2S2 BIT(2)
#define IT66121_AUD_EN_I2S1 BIT(1)
#define IT66121_AUD_EN_I2S0 BIT(0)
#define IT66121_AUD_CTRL0_AUD_SEL BIT(4)
#define IT66121_AUD_CTRL1_REG 0xE1
#define IT66121_AUD_FIFOMAP_REG 0xE2
#define IT66121_AUD_CTRL3_REG 0xE3
#define IT66121_AUD_SRCVALID_FLAT_REG 0xE4
#define IT66121_AUD_FLAT_SRC0 BIT(4)
#define IT66121_AUD_FLAT_SRC1 BIT(5)
#define IT66121_AUD_FLAT_SRC2 BIT(6)
#define IT66121_AUD_FLAT_SRC3 BIT(7)
#define IT66121_AUD_HDAUDIO_REG 0xE5
#define IT66121_AUD_PKT_CTS0_REG 0x130
#define IT66121_AUD_PKT_CTS1_REG 0x131
#define IT66121_AUD_PKT_CTS2_REG 0x132
#define IT66121_AUD_PKT_N0_REG 0x133
#define IT66121_AUD_PKT_N1_REG 0x134
#define IT66121_AUD_PKT_N2_REG 0x135
#define IT66121_AUD_CHST_MODE_REG 0x191
#define IT66121_AUD_CHST_CAT_REG 0x192
#define IT66121_AUD_CHST_SRCNUM_REG 0x193
#define IT66121_AUD_CHST_CHTNUM_REG 0x194
#define IT66121_AUD_CHST_CA_FS_REG 0x198
#define IT66121_AUD_CHST_OFS_WL_REG 0x199
#define IT66121_AUD_PKT_CTS_CNT0_REG 0x1A0
#define IT66121_AUD_PKT_CTS_CNT1_REG 0x1A1
#define IT66121_AUD_PKT_CTS_CNT2_REG 0x1A2
#define IT66121_AUD_FS_22P05K 0x4
#define IT66121_AUD_FS_44P1K 0x0
#define IT66121_AUD_FS_88P2K 0x8
#define IT66121_AUD_FS_176P4K 0xC
#define IT66121_AUD_FS_24K 0x6
#define IT66121_AUD_FS_48K 0x2
#define IT66121_AUD_FS_96K 0xA
#define IT66121_AUD_FS_192K 0xE
#define IT66121_AUD_FS_768K 0x9
#define IT66121_AUD_FS_32K 0x3
#define IT66121_AUD_FS_OTHER 0x1
#define IT66121_AUD_SWL_21BIT 0xD
#define IT66121_AUD_SWL_24BIT 0xB
#define IT66121_AUD_SWL_23BIT 0x9
#define IT66121_AUD_SWL_22BIT 0x5
#define IT66121_AUD_SWL_20BIT 0x3
#define IT66121_AUD_SWL_17BIT 0xC
#define IT66121_AUD_SWL_19BIT 0x8
#define IT66121_AUD_SWL_18BIT 0x4
#define IT66121_AUD_SWL_16BIT 0x2
#define IT66121_AUD_SWL_NOT_INDICATED 0x0
#define IT66121_VENDOR_ID0 0x54
#define IT66121_VENDOR_ID1 0x49
#define IT66121_DEVICE_ID0 0x12
#define IT66121_DEVICE_ID1 0x06
#define IT66121_DEVICE_MASK 0x0F
#define IT66121_AFE_CLK_HIGH 80000 /* Khz */
struct it66121_ctx {
@ -216,6 +304,13 @@ struct it66121_ctx {
u32 bus_width;
struct mutex lock; /* Protects fields below and device registers */
struct hdmi_avi_infoframe hdmi_avi_infoframe;
struct {
struct platform_device *pdev;
u8 ch_enable;
u8 fs;
u8 swl;
bool auto_cts;
} audio;
};
static const struct regmap_range_cfg it66121_regmap_banks[] = {
@ -227,7 +322,7 @@ static const struct regmap_range_cfg it66121_regmap_banks[] = {
.selector_mask = 0x1,
.selector_shift = 0,
.window_start = 0x00,
.window_len = 0x130,
.window_len = 0x100,
},
};
@ -886,6 +981,536 @@ static irqreturn_t it66121_irq_threaded_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
static int it661221_set_chstat(struct it66121_ctx *ctx, u8 iec60958_chstat[])
{
int ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_CHST_MODE_REG, iec60958_chstat[0] & 0x7C);
if (ret)
return ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_CHST_CAT_REG, iec60958_chstat[1]);
if (ret)
return ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_CHST_SRCNUM_REG, iec60958_chstat[2] & 0x0F);
if (ret)
return ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_CHST_CHTNUM_REG,
(iec60958_chstat[2] >> 4) & 0x0F);
if (ret)
return ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_CHST_CA_FS_REG, iec60958_chstat[3]);
if (ret)
return ret;
return regmap_write(ctx->regmap, IT66121_AUD_CHST_OFS_WL_REG, iec60958_chstat[4]);
}
static int it661221_set_lpcm_audio(struct it66121_ctx *ctx, u8 audio_src_num, u8 audio_swl)
{
int ret;
unsigned int audio_enable = 0;
unsigned int audio_format = 0;
switch (audio_swl) {
case 16:
audio_enable |= IT66121_AUD_16BIT;
break;
case 18:
audio_enable |= IT66121_AUD_18BIT;
break;
case 20:
audio_enable |= IT66121_AUD_20BIT;
break;
case 24:
default:
audio_enable |= IT66121_AUD_24BIT;
break;
}
audio_format |= 0x40;
switch (audio_src_num) {
case 4:
audio_enable |= IT66121_AUD_EN_I2S3 | IT66121_AUD_EN_I2S2 |
IT66121_AUD_EN_I2S1 | IT66121_AUD_EN_I2S0;
break;
case 3:
audio_enable |= IT66121_AUD_EN_I2S2 | IT66121_AUD_EN_I2S1 |
IT66121_AUD_EN_I2S0;
break;
case 2:
audio_enable |= IT66121_AUD_EN_I2S1 | IT66121_AUD_EN_I2S0;
break;
case 1:
default:
audio_format &= ~0x40;
audio_enable |= IT66121_AUD_EN_I2S0;
break;
}
audio_format |= 0x01;
ctx->audio.ch_enable = audio_enable;
ret = regmap_write(ctx->regmap, IT66121_AUD_CTRL0_REG, audio_enable & 0xF0);
if (ret)
return ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_CTRL1_REG, audio_format);
if (ret)
return ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_FIFOMAP_REG, 0xE4);
if (ret)
return ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_CTRL3_REG, 0x00);
if (ret)
return ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_SRCVALID_FLAT_REG, 0x00);
if (ret)
return ret;
return regmap_write(ctx->regmap, IT66121_AUD_HDAUDIO_REG, 0x00);
}
static int it661221_set_ncts(struct it66121_ctx *ctx, u8 fs)
{
int ret;
unsigned int n;
switch (fs) {
case IT66121_AUD_FS_32K:
n = 4096;
break;
case IT66121_AUD_FS_44P1K:
n = 6272;
break;
case IT66121_AUD_FS_48K:
n = 6144;
break;
case IT66121_AUD_FS_88P2K:
n = 12544;
break;
case IT66121_AUD_FS_96K:
n = 12288;
break;
case IT66121_AUD_FS_176P4K:
n = 25088;
break;
case IT66121_AUD_FS_192K:
n = 24576;
break;
case IT66121_AUD_FS_768K:
n = 24576;
break;
default:
n = 6144;
break;
}
ret = regmap_write(ctx->regmap, IT66121_AUD_PKT_N0_REG, (u8)((n) & 0xFF));
if (ret)
return ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_PKT_N1_REG, (u8)((n >> 8) & 0xFF));
if (ret)
return ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_PKT_N2_REG, (u8)((n >> 16) & 0xF));
if (ret)
return ret;
if (ctx->audio.auto_cts) {
u8 loop_cnt = 255;
u8 cts_stable_cnt = 0;
unsigned int sum_cts = 0;
unsigned int cts = 0;
unsigned int last_cts = 0;
unsigned int diff;
unsigned int val;
while (loop_cnt--) {
msleep(30);
regmap_read(ctx->regmap, IT66121_AUD_PKT_CTS_CNT2_REG, &val);
cts = val << 12;
regmap_read(ctx->regmap, IT66121_AUD_PKT_CTS_CNT1_REG, &val);
cts |= val << 4;
regmap_read(ctx->regmap, IT66121_AUD_PKT_CTS_CNT0_REG, &val);
cts |= val >> 4;
if (cts == 0) {
continue;
} else {
if (last_cts > cts)
diff = last_cts - cts;
else
diff = cts - last_cts;
last_cts = cts;
if (diff < 5) {
cts_stable_cnt++;
sum_cts += cts;
} else {
cts_stable_cnt = 0;
sum_cts = 0;
continue;
}
if (cts_stable_cnt >= 32) {
last_cts = (sum_cts >> 5);
break;
}
}
}
regmap_write(ctx->regmap, IT66121_AUD_PKT_CTS0_REG, (u8)((last_cts) & 0xFF));
regmap_write(ctx->regmap, IT66121_AUD_PKT_CTS1_REG, (u8)((last_cts >> 8) & 0xFF));
regmap_write(ctx->regmap, IT66121_AUD_PKT_CTS2_REG, (u8)((last_cts >> 16) & 0x0F));
}
ret = regmap_write(ctx->regmap, 0xF8, 0xC3);
if (ret)
return ret;
ret = regmap_write(ctx->regmap, 0xF8, 0xA5);
if (ret)
return ret;
if (ctx->audio.auto_cts) {
ret = regmap_write_bits(ctx->regmap, IT66121_PKT_CTS_CTRL_REG,
IT66121_PKT_CTS_CTRL_SEL,
1);
} else {
ret = regmap_write_bits(ctx->regmap, IT66121_PKT_CTS_CTRL_REG,
IT66121_PKT_CTS_CTRL_SEL,
0);
}
if (ret)
return ret;
return regmap_write(ctx->regmap, 0xF8, 0xFF);
}
static int it661221_audio_output_enable(struct it66121_ctx *ctx, bool enable)
{
int ret;
if (enable) {
ret = regmap_write_bits(ctx->regmap, IT66121_SW_RST_REG,
IT66121_SW_RST_AUD | IT66121_SW_RST_AREF,
0);
if (ret)
return ret;
ret = regmap_write_bits(ctx->regmap, IT66121_AUD_CTRL0_REG,
IT66121_AUD_EN_I2S3 | IT66121_AUD_EN_I2S2 |
IT66121_AUD_EN_I2S1 | IT66121_AUD_EN_I2S0,
ctx->audio.ch_enable);
} else {
ret = regmap_write_bits(ctx->regmap, IT66121_AUD_CTRL0_REG,
IT66121_AUD_EN_I2S3 | IT66121_AUD_EN_I2S2 |
IT66121_AUD_EN_I2S1 | IT66121_AUD_EN_I2S0,
ctx->audio.ch_enable & 0xF0);
if (ret)
return ret;
ret = regmap_write_bits(ctx->regmap, IT66121_SW_RST_REG,
IT66121_SW_RST_AUD | IT66121_SW_RST_AREF,
IT66121_SW_RST_AUD | IT66121_SW_RST_AREF);
}
return ret;
}
static int it661221_audio_ch_enable(struct it66121_ctx *ctx, bool enable)
{
int ret;
if (enable) {
ret = regmap_write(ctx->regmap, IT66121_AUD_SRCVALID_FLAT_REG, 0);
if (ret)
return ret;
ret = regmap_write(ctx->regmap, IT66121_AUD_CTRL0_REG, ctx->audio.ch_enable);
} else {
ret = regmap_write(ctx->regmap, IT66121_AUD_CTRL0_REG, ctx->audio.ch_enable & 0xF0);
}
return ret;
}
static int it66121_audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
{
u8 fs;
u8 swl;
int ret;
struct it66121_ctx *ctx = dev_get_drvdata(dev);
static u8 iec60958_chstat[5];
unsigned int channels = params->channels;
unsigned int sample_rate = params->sample_rate;
unsigned int sample_width = params->sample_width;
mutex_lock(&ctx->lock);
dev_dbg(dev, "%s: %u, %u, %u, %u\n", __func__,
daifmt->fmt, sample_rate, sample_width, channels);
switch (daifmt->fmt) {
case HDMI_I2S:
dev_dbg(dev, "Using HDMI I2S\n");
break;
default:
dev_err(dev, "Invalid or unsupported DAI format %d\n", daifmt->fmt);
ret = -EINVAL;
goto out;
}
// Set audio clock recovery (N/CTS)
ret = regmap_write(ctx->regmap, IT66121_CLK_CTRL0_REG,
IT66121_CLK_CTRL0_AUTO_OVER_SAMPLING |
IT66121_CLK_CTRL0_EXT_MCLK_256FS |
IT66121_CLK_CTRL0_AUTO_IPCLK);
if (ret)
goto out;
ret = regmap_write_bits(ctx->regmap, IT66121_AUD_CTRL0_REG,
IT66121_AUD_CTRL0_AUD_SEL, 0); // remove spdif selection
if (ret)
goto out;
switch (sample_rate) {
case 44100L:
fs = IT66121_AUD_FS_44P1K;
break;
case 88200L:
fs = IT66121_AUD_FS_88P2K;
break;
case 176400L:
fs = IT66121_AUD_FS_176P4K;
break;
case 32000L:
fs = IT66121_AUD_FS_32K;
break;
case 48000L:
fs = IT66121_AUD_FS_48K;
break;
case 96000L:
fs = IT66121_AUD_FS_96K;
break;
case 192000L:
fs = IT66121_AUD_FS_192K;
break;
case 768000L:
fs = IT66121_AUD_FS_768K;
break;
default:
fs = IT66121_AUD_FS_48K;
break;
}
ctx->audio.fs = fs;
ret = it661221_set_ncts(ctx, fs);
if (ret) {
dev_err(dev, "Failed to set N/CTS: %d\n", ret);
goto out;
}
// Set audio format register (except audio channel enable)
ret = it661221_set_lpcm_audio(ctx, (channels + 1) / 2, sample_width);
if (ret) {
dev_err(dev, "Failed to set LPCM audio: %d\n", ret);
goto out;
}
// Set audio channel status
iec60958_chstat[0] = 0;
if ((channels + 1) / 2 == 1)
iec60958_chstat[0] |= 0x1;
iec60958_chstat[0] &= ~(1 << 1);
iec60958_chstat[1] = 0;
iec60958_chstat[2] = (channels + 1) / 2;
iec60958_chstat[2] |= (channels << 4) & 0xF0;
iec60958_chstat[3] = fs;
switch (sample_width) {
case 21L:
swl = IT66121_AUD_SWL_21BIT;
break;
case 24L:
swl = IT66121_AUD_SWL_24BIT;
break;
case 23L:
swl = IT66121_AUD_SWL_23BIT;
break;
case 22L:
swl = IT66121_AUD_SWL_22BIT;
break;
case 20L:
swl = IT66121_AUD_SWL_20BIT;
break;
case 17L:
swl = IT66121_AUD_SWL_17BIT;
break;
case 19L:
swl = IT66121_AUD_SWL_19BIT;
break;
case 18L:
swl = IT66121_AUD_SWL_18BIT;
break;
case 16L:
swl = IT66121_AUD_SWL_16BIT;
break;
default:
swl = IT66121_AUD_SWL_NOT_INDICATED;
break;
}
iec60958_chstat[4] = (((~fs) << 4) & 0xF0) | swl;
ret = it661221_set_chstat(ctx, iec60958_chstat);
if (ret) {
dev_err(dev, "Failed to set channel status: %d\n", ret);
goto out;
}
// Enable audio channel enable while input clock stable (if SPDIF).
ret = it661221_audio_ch_enable(ctx, true);
if (ret) {
dev_err(dev, "Failed to enable audio channel: %d\n", ret);
goto out;
}
ret = regmap_write_bits(ctx->regmap, IT66121_INT_MASK1_REG,
IT66121_INT_MASK1_AUD_OVF,
0);
if (ret)
goto out;
dev_dbg(dev, "HDMI audio enabled.\n");
out:
mutex_unlock(&ctx->lock);
return ret;
}
static int it66121_audio_startup(struct device *dev, void *data)
{
int ret;
struct it66121_ctx *ctx = dev_get_drvdata(dev);
dev_dbg(dev, "%s\n", __func__);
mutex_lock(&ctx->lock);
ret = it661221_audio_output_enable(ctx, true);
if (ret)
dev_err(dev, "Failed to enable audio output: %d\n", ret);
mutex_unlock(&ctx->lock);
return ret;
}
static void it66121_audio_shutdown(struct device *dev, void *data)
{
int ret;
struct it66121_ctx *ctx = dev_get_drvdata(dev);
dev_dbg(dev, "%s\n", __func__);
mutex_lock(&ctx->lock);
ret = it661221_audio_output_enable(ctx, false);
if (ret)
dev_err(dev, "Failed to disable audio output: %d\n", ret);
mutex_unlock(&ctx->lock);
}
static int it66121_audio_mute(struct device *dev, void *data,
bool enable, int direction)
{
int ret;
struct it66121_ctx *ctx = dev_get_drvdata(dev);
dev_dbg(dev, "%s: enable=%s, direction=%d\n",
__func__, enable ? "true" : "false", direction);
mutex_lock(&ctx->lock);
if (enable) {
ret = regmap_write_bits(ctx->regmap, IT66121_AUD_SRCVALID_FLAT_REG,
IT66121_AUD_FLAT_SRC0 | IT66121_AUD_FLAT_SRC1 |
IT66121_AUD_FLAT_SRC2 | IT66121_AUD_FLAT_SRC3,
IT66121_AUD_FLAT_SRC0 | IT66121_AUD_FLAT_SRC1 |
IT66121_AUD_FLAT_SRC2 | IT66121_AUD_FLAT_SRC3);
} else {
ret = regmap_write_bits(ctx->regmap, IT66121_AUD_SRCVALID_FLAT_REG,
IT66121_AUD_FLAT_SRC0 | IT66121_AUD_FLAT_SRC1 |
IT66121_AUD_FLAT_SRC2 | IT66121_AUD_FLAT_SRC3,
0);
}
mutex_unlock(&ctx->lock);
return ret;
}
static int it66121_audio_get_eld(struct device *dev, void *data,
u8 *buf, size_t len)
{
struct it66121_ctx *ctx = dev_get_drvdata(dev);
mutex_lock(&ctx->lock);
memcpy(buf, ctx->connector->eld,
min(sizeof(ctx->connector->eld), len));
mutex_unlock(&ctx->lock);
return 0;
}
static const struct hdmi_codec_ops it66121_audio_codec_ops = {
.hw_params = it66121_audio_hw_params,
.audio_startup = it66121_audio_startup,
.audio_shutdown = it66121_audio_shutdown,
.mute_stream = it66121_audio_mute,
.get_eld = it66121_audio_get_eld,
.no_capture_mute = 1,
};
static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
{
struct hdmi_codec_pdata codec_data = {
.ops = &it66121_audio_codec_ops,
.i2s = 1, /* Only i2s support for now */
.spdif = 0,
.max_i2s_channels = 8,
};
dev_dbg(dev, "%s\n", __func__);
if (!of_property_read_bool(dev->of_node, "#sound-dai-cells")) {
dev_info(dev, "No \"#sound-dai-cells\", no audio\n");
return 0;
}
ctx->audio.pdev = platform_device_register_data(dev,
HDMI_CODEC_DRV_NAME,
PLATFORM_DEVID_AUTO,
&codec_data,
sizeof(codec_data));
if (IS_ERR(ctx->audio.pdev)) {
dev_err(dev, "Failed to initialize HDMI audio codec: %d\n",
PTR_ERR_OR_ZERO(ctx->audio.pdev));
}
return PTR_ERR_OR_ZERO(ctx->audio.pdev);
}
static int it66121_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@ -988,6 +1613,8 @@ static int it66121_probe(struct i2c_client *client,
return ret;
}
it66121_audio_codec_init(ctx, dev);
drm_bridge_add(&ctx->bridge);
dev_info(ctx->dev, "IT66121 revision %d probed\n", revision_id);

View file

@ -700,7 +700,9 @@ lt9611_connector_mode_valid(struct drm_connector *connector,
}
/* bridge funcs */
static void lt9611_bridge_enable(struct drm_bridge *bridge)
static void
lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
@ -721,7 +723,9 @@ static void lt9611_bridge_enable(struct drm_bridge *bridge)
regmap_write(lt9611->regmap, 0x8130, 0xea);
}
static void lt9611_bridge_disable(struct drm_bridge *bridge)
static void
lt9611_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
int ret;
@ -856,7 +860,9 @@ static void lt9611_bridge_pre_enable(struct drm_bridge *bridge)
lt9611->sleep = false;
}
static void lt9611_bridge_post_disable(struct drm_bridge *bridge)
static void
lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
@ -916,16 +922,47 @@ static void lt9611_bridge_hpd_enable(struct drm_bridge *bridge)
lt9611_enable_hpd_interrupts(lt9611);
}
#define MAX_INPUT_SEL_FORMATS 1
static u32 *
lt9611_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
u32 *input_fmts;
*num_input_fmts = 0;
input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
GFP_KERNEL);
if (!input_fmts)
return NULL;
/* This is the DSI-end bus format */
input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
*num_input_fmts = 1;
return input_fmts;
}
static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.attach = lt9611_bridge_attach,
.mode_valid = lt9611_bridge_mode_valid,
.enable = lt9611_bridge_enable,
.disable = lt9611_bridge_disable,
.post_disable = lt9611_bridge_post_disable,
.mode_set = lt9611_bridge_mode_set,
.detect = lt9611_bridge_detect,
.get_edid = lt9611_bridge_get_edid,
.hpd_enable = lt9611_bridge_hpd_enable,
.atomic_enable = lt9611_bridge_atomic_enable,
.atomic_disable = lt9611_bridge_atomic_disable,
.atomic_post_disable = lt9611_bridge_atomic_post_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_get_input_bus_fmts = lt9611_atomic_get_input_bus_fmts,
};
static int lt9611_parse_dt(struct device *dev,

View file

@ -26,7 +26,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <video/mipi_display.h>
@ -853,7 +852,7 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
/* Save the new desired phy config */
memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg));
memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
drm_mode_copy(&dsi->mode, adjusted_mode);
drm_mode_debug_printmodeline(adjusted_mode);
if (pm_runtime_resume_and_get(dev) < 0)
@ -910,32 +909,14 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
struct drm_bridge *panel_bridge;
struct drm_panel *panel;
int ret;
ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
&panel_bridge);
if (ret)
return ret;
if (panel) {
panel_bridge = drm_panel_bridge_add(panel);
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
}
if (!panel_bridge)
return -EPROBE_DEFER;
panel_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node, 1, 0);
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags);
}
static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
{ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0);
}
static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@ -981,7 +962,6 @@ static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
.mode_set = nwl_dsi_bridge_mode_set,
.mode_valid = nwl_dsi_bridge_mode_valid,
.attach = nwl_dsi_bridge_attach,
.detach = nwl_dsi_bridge_detach,
};
static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
@ -1153,7 +1133,7 @@ MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids);
static const struct soc_device_attribute nwl_dsi_quirks_match[] = {
{ .soc_id = "i.MX8MQ", .revision = "2.0",
.data = (void *)E11418_HS_MODE_QUIRK },
{ /* sentinel. */ },
{ /* sentinel. */ }
};
static int nwl_dsi_probe(struct platform_device *pdev)

View file

@ -263,7 +263,6 @@ static int ptn3460_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct ptn3460_bridge *ptn_bridge;
struct drm_bridge *panel_bridge;
struct drm_panel *panel;
int ret;
ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL);
@ -271,11 +270,7 @@ static int ptn3460_probe(struct i2c_client *client,
return -ENOMEM;
}
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &panel, NULL);
if (ret)
return ret;
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);

View file

@ -83,6 +83,9 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
drm_connector_attach_encoder(&panel_bridge->connector,
bridge->encoder);
if (connector->funcs->reset)
connector->funcs->reset(connector);
return 0;
}

View file

@ -452,18 +452,13 @@ static int ps8622_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct ps8622_bridge *ps8622;
struct drm_bridge *panel_bridge;
struct drm_panel *panel;
int ret;
ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL);
if (!ps8622)
return -ENOMEM;
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &panel, NULL);
if (ret)
return ret;
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);

View file

@ -589,7 +589,6 @@ static int ps8640_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
struct ps8640 *ps_bridge;
struct drm_panel *panel;
int ret;
u32 i;
@ -674,13 +673,7 @@ static int ps8640_probe(struct i2c_client *client)
devm_of_dp_aux_populate_ep_devices(&ps_bridge->aux);
/* port@1 is ps8640 output port */
ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
if (ret < 0)
return ret;
if (!panel)
return -ENODEV;
ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
ps_bridge->panel_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
if (IS_ERR(ps_bridge->panel_bridge))
return PTR_ERR(ps_bridge->panel_bridge);

View file

@ -2830,7 +2830,7 @@ static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
mutex_lock(&hdmi->mutex);
/* Store the display mode for plugin/DKMS poweron events */
memcpy(&hdmi->previous_mode, mode, sizeof(hdmi->previous_mode));
drm_mode_copy(&hdmi->previous_mode, mode);
mutex_unlock(&hdmi->mutex);
}

View file

@ -246,7 +246,6 @@ struct dw_mipi_dsi {
struct clk *pclk;
bool device_found;
unsigned int lane_mbps; /* per lane */
u32 channel;
u32 lanes;
@ -310,37 +309,12 @@ static inline u32 dsi_read(struct dw_mipi_dsi *dsi, u32 reg)
return readl(dsi->base + reg);
}
static int dw_mipi_dsi_panel_or_bridge(struct dw_mipi_dsi *dsi,
struct device_node *node)
{
struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
if (ret)
return ret;
if (panel) {
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
dsi->panel_bridge = bridge;
if (!dsi->panel_bridge)
return -EPROBE_DEFER;
return 0;
}
static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct dw_mipi_dsi *dsi = host_to_dsi(host);
const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
struct drm_bridge *bridge;
int ret;
if (device->lanes > dsi->plat_data->max_data_lanes) {
@ -354,13 +328,13 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
if (!dsi->device_found) {
ret = dw_mipi_dsi_panel_or_bridge(dsi, host->dev->of_node);
if (ret)
return ret;
bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node, 1, 0);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
dsi->device_found = true;
}
dsi->panel_bridge = bridge;
drm_bridge_add(&dsi->bridge);
if (pdata->host_ops && pdata->host_ops->attach) {
ret = pdata->host_ops->attach(pdata->priv_data, device);
@ -1021,16 +995,6 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
/* Set the encoder type as caller does not know it */
bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
if (!dsi->device_found) {
int ret;
ret = dw_mipi_dsi_panel_or_bridge(dsi, dsi->dev->of_node);
if (ret)
return ret;
dsi->device_found = true;
}
/* Attach the panel-bridge to the dsi bridge */
return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
flags);
@ -1217,7 +1181,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
#ifdef CONFIG_OF
dsi->bridge.of_node = pdev->dev.of_node;
#endif
drm_bridge_add(&dsi->bridge);
return dsi;
}

View file

@ -179,15 +179,8 @@ static int tc358762_parse_dt(struct tc358762 *ctx)
{
struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
struct drm_panel *panel;
int ret;
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
if (ret)
return ret;
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);

View file

@ -16,14 +16,9 @@
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
@ -153,10 +148,9 @@ static const char * const tc358764_supplies[] = {
struct tc358764 {
struct device *dev;
struct drm_bridge bridge;
struct drm_connector connector;
struct drm_bridge *next_bridge;
struct regulator_bulk_data supplies[ARRAY_SIZE(tc358764_supplies)];
struct gpio_desc *gpio_reset;
struct drm_panel *panel;
int error;
};
@ -210,12 +204,6 @@ static inline struct tc358764 *bridge_to_tc358764(struct drm_bridge *bridge)
return container_of(bridge, struct tc358764, bridge);
}
static inline
struct tc358764 *connector_to_tc358764(struct drm_connector *connector)
{
return container_of(connector, struct tc358764, connector);
}
static int tc358764_init(struct tc358764 *ctx)
{
u32 v = 0;
@ -278,43 +266,11 @@ static void tc358764_reset(struct tc358764 *ctx)
usleep_range(1000, 2000);
}
static int tc358764_get_modes(struct drm_connector *connector)
{
struct tc358764 *ctx = connector_to_tc358764(connector);
return drm_panel_get_modes(ctx->panel, connector);
}
static const
struct drm_connector_helper_funcs tc358764_connector_helper_funcs = {
.get_modes = tc358764_get_modes,
};
static const struct drm_connector_funcs tc358764_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static void tc358764_disable(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
int ret = drm_panel_disable(bridge_to_tc358764(bridge)->panel);
if (ret < 0)
dev_err(ctx->dev, "error disabling panel (%d)\n", ret);
}
static void tc358764_post_disable(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
int ret;
ret = drm_panel_unprepare(ctx->panel);
if (ret < 0)
dev_err(ctx->dev, "error unpreparing panel (%d)\n", ret);
tc358764_reset(ctx);
usleep_range(10000, 15000);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
@ -335,72 +291,25 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
ret = tc358764_init(ctx);
if (ret < 0)
dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
ret = drm_panel_prepare(ctx->panel);
if (ret < 0)
dev_err(ctx->dev, "error preparing panel (%d)\n", ret);
}
static void tc358764_enable(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
int ret = drm_panel_enable(ctx->panel);
if (ret < 0)
dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
}
static int tc358764_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
struct drm_device *drm = bridge->dev;
int ret;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
DRM_ERROR("Fix bridge driver to make connector optional!");
return -EINVAL;
}
ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(drm, &ctx->connector,
&tc358764_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
return ret;
}
drm_connector_helper_add(&ctx->connector,
&tc358764_connector_helper_funcs);
drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
ctx->connector.funcs->reset(&ctx->connector);
drm_connector_register(&ctx->connector);
return 0;
}
static void tc358764_detach(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
drm_connector_unregister(&ctx->connector);
ctx->panel = NULL;
drm_connector_put(&ctx->connector);
return drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
.disable = tc358764_disable,
.post_disable = tc358764_post_disable,
.enable = tc358764_enable,
.pre_enable = tc358764_pre_enable,
.attach = tc358764_attach,
.detach = tc358764_detach,
};
static int tc358764_parse_dt(struct tc358764 *ctx)
{
struct device *dev = ctx->dev;
int ret;
ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpio_reset)) {
@ -408,12 +317,11 @@ static int tc358764_parse_dt(struct tc358764 *ctx)
return PTR_ERR(ctx->gpio_reset);
}
ret = drm_of_find_panel_or_bridge(ctx->dev->of_node, 1, 0, &ctx->panel,
NULL);
if (ret && ret != -EPROBE_DEFER)
dev_err(dev, "cannot find panel (%d)\n", ret);
ctx->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
if (IS_ERR(ctx->next_bridge))
return PTR_ERR(ctx->next_bridge);
return ret;
return 0;
}
static int tc358764_configure_regulators(struct tc358764 *ctx)

View file

@ -1,6 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* tc358767 eDP bridge driver
* TC358767/TC358867/TC9595 DSI/DPI-to-DPI/(e)DP bridge driver
*
* The TC358767/TC358867/TC9595 can operate in multiple modes.
* The following modes are supported:
* DPI->(e)DP -- supported
* DSI->DPI .... supported
* DSI->(e)DP .. NOT supported
*
* Copyright (C) 2016 CogentEmbedded Inc
* Author: Andrey Gusakov <andrey.gusakov@cogentembedded.com>
@ -29,6 +35,7 @@
#include <drm/drm_bridge.h>
#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
@ -36,7 +43,35 @@
/* Registers */
/* Display Parallel Interface */
/* PPI layer registers */
#define PPI_STARTPPI 0x0104 /* START control bit */
#define PPI_LPTXTIMECNT 0x0114 /* LPTX timing signal */
#define LPX_PERIOD 3
#define PPI_LANEENABLE 0x0134
#define PPI_TX_RX_TA 0x013c
#define TTA_GET 0x40000
#define TTA_SURE 6
#define PPI_D0S_ATMR 0x0144
#define PPI_D1S_ATMR 0x0148
#define PPI_D0S_CLRSIPOCOUNT 0x0164 /* Assertion timer for Lane 0 */
#define PPI_D1S_CLRSIPOCOUNT 0x0168 /* Assertion timer for Lane 1 */
#define PPI_D2S_CLRSIPOCOUNT 0x016c /* Assertion timer for Lane 2 */
#define PPI_D3S_CLRSIPOCOUNT 0x0170 /* Assertion timer for Lane 3 */
#define PPI_START_FUNCTION BIT(0)
/* DSI layer registers */
#define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX */
#define DSI_LANEENABLE 0x0210 /* Enables each lane */
#define DSI_RX_START BIT(0)
/* Lane enable PPI and DSI register bits */
#define LANEENABLE_CLEN BIT(0)
#define LANEENABLE_L0EN BIT(1)
#define LANEENABLE_L1EN BIT(2)
#define LANEENABLE_L2EN BIT(1)
#define LANEENABLE_L3EN BIT(2)
/* Display Parallel Input Interface */
#define DPIPXLFMT 0x0440
#define VS_POL_ACTIVE_LOW (1 << 10)
#define HS_POL_ACTIVE_LOW (1 << 9)
@ -48,6 +83,14 @@
#define DPI_BPP_RGB666 (1 << 0)
#define DPI_BPP_RGB565 (2 << 0)
/* Display Parallel Output Interface */
#define POCTRL 0x0448
#define POCTRL_S2P BIT(7)
#define POCTRL_PCLK_POL BIT(3)
#define POCTRL_VS_POL BIT(2)
#define POCTRL_HS_POL BIT(1)
#define POCTRL_DE_POL BIT(0)
/* Video Path */
#define VPCTRL0 0x0450
#define VSDELAY GENMASK(31, 20)
@ -247,6 +290,9 @@ struct tc_data {
struct drm_bridge *panel_bridge;
struct drm_connector connector;
struct mipi_dsi_device *dsi;
u8 dsi_lanes;
/* link settings */
struct tc_edp_link link;
@ -469,10 +515,24 @@ static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
int mul, best_mul = 1;
int delta, best_delta;
int ext_div[] = {1, 2, 3, 5, 7};
int clk_min, clk_max;
int best_pixelclock = 0;
int vco_hi = 0;
u32 pxl_pllparam;
/*
* refclk * mul / (ext_pre_div * pre_div) should be in range:
* - DPI ..... 0 to 100 MHz
* - (e)DP ... 150 to 650 MHz
*/
if (tc->bridge.type == DRM_MODE_CONNECTOR_DPI) {
clk_min = 0;
clk_max = 100000000;
} else {
clk_min = 150000000;
clk_max = 650000000;
}
dev_dbg(tc->dev, "PLL: requested %d pixelclock, ref %d\n", pixelclock,
refclk);
best_delta = pixelclock;
@ -499,11 +559,7 @@ static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
continue;
clk = (refclk / ext_div[i_pre] / div) * mul;
/*
* refclk * mul / (ext_pre_div * pre_div)
* should be in the 150 to 650 MHz range
*/
if ((clk > 650000000) || (clk < 150000000))
if ((clk > clk_max) || (clk < clk_min))
continue;
clk = clk / ext_div[i_post];
@ -656,6 +712,12 @@ static int tc_aux_link_setup(struct tc_data *tc)
if (ret)
goto err;
/* Register DP AUX channel */
tc->aux.name = "TC358767 AUX i2c adapter";
tc->aux.dev = tc->dev;
tc->aux.transfer = tc_aux_transfer;
drm_dp_aux_init(&tc->aux);
return 0;
err:
dev_err(tc->dev, "tc_aux_link_setup failed: %d\n", ret);
@ -728,33 +790,16 @@ static int tc_get_display_props(struct tc_data *tc)
return ret;
}
static int tc_set_video_mode(struct tc_data *tc,
const struct drm_display_mode *mode)
static int tc_set_common_video_mode(struct tc_data *tc,
const struct drm_display_mode *mode)
{
int ret;
int vid_sync_dly;
int max_tu_symbol;
int left_margin = mode->htotal - mode->hsync_end;
int right_margin = mode->hsync_start - mode->hdisplay;
int hsync_len = mode->hsync_end - mode->hsync_start;
int upper_margin = mode->vtotal - mode->vsync_end;
int lower_margin = mode->vsync_start - mode->vdisplay;
int vsync_len = mode->vsync_end - mode->vsync_start;
u32 dp0_syncval;
u32 bits_per_pixel = 24;
u32 in_bw, out_bw;
/*
* Recommended maximum number of symbols transferred in a transfer unit:
* DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
* (output active video bandwidth in bytes))
* Must be less than tu_size.
*/
in_bw = mode->clock * bits_per_pixel / 8;
out_bw = tc->link.num_lanes * tc->link.rate;
max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
int ret;
dev_dbg(tc->dev, "set mode %dx%d\n",
mode->hdisplay, mode->vdisplay);
@ -812,8 +857,49 @@ static int tc_set_video_mode(struct tc_data *tc,
FIELD_PREP(COLOR_B, 99) |
ENI2CFILTER |
FIELD_PREP(COLOR_BAR_MODE, COLOR_BAR_MODE_BARS));
if (ret)
return ret;
return ret;
}
static int tc_set_dpi_video_mode(struct tc_data *tc,
const struct drm_display_mode *mode)
{
u32 value = POCTRL_S2P;
if (tc->mode.flags & DRM_MODE_FLAG_NHSYNC)
value |= POCTRL_HS_POL;
if (tc->mode.flags & DRM_MODE_FLAG_NVSYNC)
value |= POCTRL_VS_POL;
return regmap_write(tc->regmap, POCTRL, value);
}
static int tc_set_edp_video_mode(struct tc_data *tc,
const struct drm_display_mode *mode)
{
int ret;
int vid_sync_dly;
int max_tu_symbol;
int left_margin = mode->htotal - mode->hsync_end;
int hsync_len = mode->hsync_end - mode->hsync_start;
int upper_margin = mode->vtotal - mode->vsync_end;
int vsync_len = mode->vsync_end - mode->vsync_start;
u32 dp0_syncval;
u32 bits_per_pixel = 24;
u32 in_bw, out_bw;
/*
* Recommended maximum number of symbols transferred in a transfer unit:
* DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
* (output active video bandwidth in bytes))
* Must be less than tu_size.
*/
in_bw = mode->clock * bits_per_pixel / 8;
out_bw = tc->link.num_lanes * tc->link.rate;
max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
/* DP Main Stream Attributes */
vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
@ -863,10 +949,7 @@ static int tc_set_video_mode(struct tc_data *tc,
FIELD_PREP(MAX_TU_SYMBOL, max_tu_symbol) |
FIELD_PREP(TU_SIZE, TU_SIZE_RECOMMENDED) |
BPC_8);
if (ret)
return ret;
return 0;
return ret;
}
static int tc_wait_link_training(struct tc_data *tc)
@ -1164,7 +1247,86 @@ static int tc_main_link_disable(struct tc_data *tc)
return regmap_write(tc->regmap, DP0CTL, 0);
}
static int tc_stream_enable(struct tc_data *tc)
static int tc_dpi_stream_enable(struct tc_data *tc)
{
int ret;
u32 value;
dev_dbg(tc->dev, "enable video stream\n");
/* Setup PLL */
ret = tc_set_syspllparam(tc);
if (ret)
return ret;
/*
* Initially PLLs are in bypass. Force PLL parameter update,
* disable PLL bypass, enable PLL
*/
ret = tc_pllupdate(tc, DP0_PLLCTRL);
if (ret)
return ret;
ret = tc_pllupdate(tc, DP1_PLLCTRL);
if (ret)
return ret;
/* Pixel PLL must always be enabled for DPI mode */
ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
1000 * tc->mode.clock);
if (ret)
return ret;
regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 3);
regmap_write(tc->regmap, PPI_D0S_ATMR, 0);
regmap_write(tc->regmap, PPI_D1S_ATMR, 0);
regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
regmap_write(tc->regmap, PPI_LPTXTIMECNT, LPX_PERIOD);
value = ((LANEENABLE_L0EN << tc->dsi_lanes) - LANEENABLE_L0EN) |
LANEENABLE_CLEN;
regmap_write(tc->regmap, PPI_LANEENABLE, value);
regmap_write(tc->regmap, DSI_LANEENABLE, value);
ret = tc_set_common_video_mode(tc, &tc->mode);
if (ret)
return ret;
ret = tc_set_dpi_video_mode(tc, &tc->mode);
if (ret)
return ret;
/* Set input interface */
value = DP0_AUDSRC_NO_INPUT;
if (tc_test_pattern)
value |= DP0_VIDSRC_COLOR_BAR;
else
value |= DP0_VIDSRC_DSI_RX;
ret = regmap_write(tc->regmap, SYSCTRL, value);
if (ret)
return ret;
usleep_range(120, 150);
regmap_write(tc->regmap, PPI_STARTPPI, PPI_START_FUNCTION);
regmap_write(tc->regmap, DSI_STARTDSI, DSI_RX_START);
return 0;
}
static int tc_dpi_stream_disable(struct tc_data *tc)
{
dev_dbg(tc->dev, "disable video stream\n");
tc_pxl_pll_dis(tc);
return 0;
}
static int tc_edp_stream_enable(struct tc_data *tc)
{
int ret;
u32 value;
@ -1179,7 +1341,11 @@ static int tc_stream_enable(struct tc_data *tc)
return ret;
}
ret = tc_set_video_mode(tc, &tc->mode);
ret = tc_set_common_video_mode(tc, &tc->mode);
if (ret)
return ret;
ret = tc_set_edp_video_mode(tc, &tc->mode);
if (ret)
return ret;
@ -1219,7 +1385,7 @@ static int tc_stream_enable(struct tc_data *tc)
return 0;
}
static int tc_stream_disable(struct tc_data *tc)
static int tc_edp_stream_disable(struct tc_data *tc)
{
int ret;
@ -1234,7 +1400,37 @@ static int tc_stream_disable(struct tc_data *tc)
return 0;
}
static void tc_bridge_enable(struct drm_bridge *bridge)
static void
tc_dpi_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
ret = tc_dpi_stream_enable(tc);
if (ret < 0) {
dev_err(tc->dev, "main link stream start error: %d\n", ret);
tc_main_link_disable(tc);
return;
}
}
static void
tc_dpi_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
ret = tc_dpi_stream_disable(tc);
if (ret < 0)
dev_err(tc->dev, "main link stream stop error: %d\n", ret);
}
static void
tc_edp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
@ -1251,7 +1447,7 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
return;
}
ret = tc_stream_enable(tc);
ret = tc_edp_stream_enable(tc);
if (ret < 0) {
dev_err(tc->dev, "main link stream start error: %d\n", ret);
tc_main_link_disable(tc);
@ -1259,12 +1455,14 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
}
}
static void tc_bridge_disable(struct drm_bridge *bridge)
static void
tc_edp_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct tc_data *tc = bridge_to_tc(bridge);
int ret;
ret = tc_stream_disable(tc);
ret = tc_edp_stream_disable(tc);
if (ret < 0)
dev_err(tc->dev, "main link stream stop error: %d\n", ret);
@ -1285,9 +1483,57 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
return true;
}
static enum drm_mode_status tc_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
static int tc_common_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
const unsigned int max_khz)
{
tc_bridge_mode_fixup(bridge, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (crtc_state->adjusted_mode.clock > max_khz)
return -EINVAL;
return 0;
}
static int tc_dpi_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
/* DSI->DPI interface clock limitation: upto 100 MHz */
return tc_common_atomic_check(bridge, bridge_state, crtc_state,
conn_state, 100000);
}
static int tc_edp_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
/* DPI->(e)DP interface clock limitation: upto 154 MHz */
return tc_common_atomic_check(bridge, bridge_state, crtc_state,
conn_state, 154000);
}
static enum drm_mode_status
tc_dpi_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
/* DPI interface clock limitation: upto 100 MHz */
if (mode->clock > 100000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static enum drm_mode_status
tc_edp_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct tc_data *tc = bridge_to_tc(bridge);
u32 req, avail;
@ -1312,7 +1558,7 @@ static void tc_bridge_mode_set(struct drm_bridge *bridge,
{
struct tc_data *tc = bridge_to_tc(bridge);
tc->mode = *mode;
drm_mode_copy(&tc->mode, mode);
}
static struct edid *tc_get_edid(struct drm_bridge *bridge,
@ -1395,8 +1641,20 @@ static const struct drm_connector_funcs tc_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int tc_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
static int tc_dpi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct tc_data *tc = bridge_to_tc(bridge);
if (!tc->panel_bridge)
return 0;
return drm_bridge_attach(tc->bridge.encoder, tc->panel_bridge,
&tc->bridge, flags);
}
static int tc_edp_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
struct tc_data *tc = bridge_to_tc(bridge);
@ -1448,21 +1706,64 @@ static int tc_bridge_attach(struct drm_bridge *bridge,
return ret;
}
static void tc_bridge_detach(struct drm_bridge *bridge)
static void tc_edp_bridge_detach(struct drm_bridge *bridge)
{
drm_dp_aux_unregister(&bridge_to_tc(bridge)->aux);
}
static const struct drm_bridge_funcs tc_bridge_funcs = {
.attach = tc_bridge_attach,
.detach = tc_bridge_detach,
.mode_valid = tc_mode_valid,
#define MAX_INPUT_SEL_FORMATS 1
static u32 *
tc_dpi_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
u32 *input_fmts;
*num_input_fmts = 0;
input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
GFP_KERNEL);
if (!input_fmts)
return NULL;
/* This is the DSI-end bus format */
input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
*num_input_fmts = 1;
return input_fmts;
}
static const struct drm_bridge_funcs tc_dpi_bridge_funcs = {
.attach = tc_dpi_bridge_attach,
.mode_valid = tc_dpi_mode_valid,
.mode_set = tc_bridge_mode_set,
.enable = tc_bridge_enable,
.disable = tc_bridge_disable,
.atomic_check = tc_dpi_atomic_check,
.atomic_enable = tc_dpi_bridge_atomic_enable,
.atomic_disable = tc_dpi_bridge_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_get_input_bus_fmts = tc_dpi_atomic_get_input_bus_fmts,
};
static const struct drm_bridge_funcs tc_edp_bridge_funcs = {
.attach = tc_edp_bridge_attach,
.detach = tc_edp_bridge_detach,
.mode_valid = tc_edp_mode_valid,
.mode_set = tc_bridge_mode_set,
.atomic_check = tc_edp_atomic_check,
.atomic_enable = tc_edp_bridge_atomic_enable,
.atomic_disable = tc_edp_bridge_atomic_disable,
.mode_fixup = tc_bridge_mode_fixup,
.detect = tc_bridge_detect,
.get_edid = tc_get_edid,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
};
static bool tc_readable_reg(struct device *dev, unsigned int reg)
@ -1549,18 +1850,87 @@ static irqreturn_t tc_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
static int tc_mipi_dsi_host_attach(struct tc_data *tc)
{
struct device *dev = &client->dev;
struct device *dev = tc->dev;
struct device_node *host_node;
struct device_node *endpoint;
struct mipi_dsi_device *dsi;
struct mipi_dsi_host *host;
const struct mipi_dsi_device_info info = {
.type = "tc358767",
.channel = 0,
.node = NULL,
};
int dsi_lanes, ret;
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
host_node = of_graph_get_remote_port_parent(endpoint);
host = of_find_mipi_dsi_host_by_node(host_node);
of_node_put(host_node);
of_node_put(endpoint);
if (dsi_lanes < 0 || dsi_lanes > 4)
return -EINVAL;
if (!host)
return -EPROBE_DEFER;
dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dsi))
return dev_err_probe(dev, PTR_ERR(dsi),
"failed to create dsi device\n");
tc->dsi = dsi;
tc->dsi_lanes = dsi_lanes;
dsi->lanes = tc->dsi_lanes;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
dev_err(dev, "failed to attach dsi to host: %d\n", ret);
return ret;
}
return 0;
}
static int tc_probe_dpi_bridge_endpoint(struct tc_data *tc)
{
struct device *dev = tc->dev;
struct drm_panel *panel;
struct tc_data *tc;
int ret;
tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
if (!tc)
return -ENOMEM;
/* port@1 is the DPI input/output port */
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
if (ret && ret != -ENODEV)
return ret;
tc->dev = dev;
if (panel) {
struct drm_bridge *panel_bridge;
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
tc->panel_bridge = panel_bridge;
tc->bridge.type = DRM_MODE_CONNECTOR_DPI;
tc->bridge.funcs = &tc_dpi_bridge_funcs;
return 0;
}
return ret;
}
static int tc_probe_edp_bridge_endpoint(struct tc_data *tc)
{
struct device *dev = tc->dev;
struct drm_panel *panel;
int ret;
/* port@2 is the output port */
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, NULL);
@ -1580,6 +1950,74 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
tc->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
}
tc->bridge.funcs = &tc_edp_bridge_funcs;
if (tc->hpd_pin >= 0)
tc->bridge.ops |= DRM_BRIDGE_OP_DETECT;
tc->bridge.ops |= DRM_BRIDGE_OP_EDID;
return ret;
}
static int tc_probe_bridge_endpoint(struct tc_data *tc)
{
struct device *dev = tc->dev;
struct of_endpoint endpoint;
struct device_node *node = NULL;
const u8 mode_dpi_to_edp = BIT(1) | BIT(2);
const u8 mode_dsi_to_edp = BIT(0) | BIT(2);
const u8 mode_dsi_to_dpi = BIT(0) | BIT(1);
u8 mode = 0;
/*
* Determine bridge configuration.
*
* Port allocation:
* port@0 - DSI input
* port@1 - DPI input/output
* port@2 - eDP output
*
* Possible connections:
* DPI -> port@1 -> port@2 -> eDP :: [port@0 is not connected]
* DSI -> port@0 -> port@2 -> eDP :: [port@1 is not connected]
* DSI -> port@0 -> port@1 -> DPI :: [port@2 is not connected]
*/
for_each_endpoint_of_node(dev->of_node, node) {
of_graph_parse_endpoint(node, &endpoint);
if (endpoint.port > 2)
return -EINVAL;
mode |= BIT(endpoint.port);
}
if (mode == mode_dpi_to_edp)
return tc_probe_edp_bridge_endpoint(tc);
else if (mode == mode_dsi_to_dpi)
return tc_probe_dpi_bridge_endpoint(tc);
else if (mode == mode_dsi_to_edp)
dev_warn(dev, "The mode DSI-to-(e)DP is not supported!\n");
else
dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode);
return -EINVAL;
}
static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct tc_data *tc;
int ret;
tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
if (!tc)
return -ENOMEM;
tc->dev = dev;
ret = tc_probe_bridge_endpoint(tc);
if (ret)
return ret;
/* Shut down GPIO is optional */
tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
if (IS_ERR(tc->sd_gpio))
@ -1686,26 +2124,25 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
ret = tc_aux_link_setup(tc);
if (ret)
return ret;
/* Register DP AUX channel */
tc->aux.name = "TC358767 AUX i2c adapter";
tc->aux.dev = tc->dev;
tc->aux.transfer = tc_aux_transfer;
drm_dp_aux_init(&tc->aux);
tc->bridge.funcs = &tc_bridge_funcs;
if (tc->hpd_pin >= 0)
tc->bridge.ops |= DRM_BRIDGE_OP_DETECT;
tc->bridge.ops |= DRM_BRIDGE_OP_EDID;
if (tc->bridge.type != DRM_MODE_CONNECTOR_DPI) { /* (e)DP output */
ret = tc_aux_link_setup(tc);
if (ret)
return ret;
}
tc->bridge.of_node = dev->of_node;
drm_bridge_add(&tc->bridge);
i2c_set_clientdata(client, tc);
if (tc->bridge.type == DRM_MODE_CONNECTOR_DPI) { /* DPI output */
ret = tc_mipi_dsi_host_attach(tc);
if (ret) {
drm_bridge_remove(&tc->bridge);
return ret;
}
}
return 0;
}

View file

@ -649,7 +649,6 @@ static int tc_attach_host(struct tc_data *tc)
static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct drm_panel *panel;
struct tc_data *tc;
int ret;
@ -660,14 +659,8 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
tc->dev = dev;
tc->i2c = client;
ret = drm_of_find_panel_or_bridge(dev->of_node, TC358775_LVDS_OUT0,
0, &panel, NULL);
if (ret < 0)
return ret;
if (!panel)
return -ENODEV;
tc->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
tc->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node,
TC358775_LVDS_OUT0, 0);
if (IS_ERR(tc->panel_bridge))
return PTR_ERR(tc->panel_bridge);

View file

@ -488,6 +488,11 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
/* Clear all errors that got asserted during initialization. */
regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
regmap_write(ctx->regmap, REG_IRQ_STAT, pval);
usleep_range(10000, 12000);
regmap_read(ctx->regmap, REG_IRQ_STAT, &pval);
if (pval)
dev_err(ctx->dev, "Unexpected link status 0x%02x\n", pval);
}
static void sn65dsi83_atomic_disable(struct drm_bridge *bridge,
@ -565,7 +570,6 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
struct device_node *endpoint;
struct drm_panel *panel;
int ret;
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
@ -605,15 +609,10 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
}
}
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
if (ret < 0)
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 2, 0);
if (IS_ERR(panel_bridge)) {
ret = PTR_ERR(panel_bridge);
goto err_put_node;
if (panel) {
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(panel_bridge)) {
ret = PTR_ERR(panel_bridge);
goto err_put_node;
}
}
ctx->panel_bridge = panel_bridge;

View file

@ -1188,15 +1188,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
{
struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
struct device_node *np = pdata->dev->of_node;
struct drm_panel *panel;
int ret;
ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
if (ret)
return dev_err_probe(&adev->dev, ret,
"could not find any panel node\n");
pdata->next_bridge = devm_drm_panel_bridge_add(pdata->dev, panel);
pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0);
if (IS_ERR(pdata->next_bridge)) {
DRM_ERROR("failed to create panel bridge\n");
return PTR_ERR(pdata->next_bridge);

View file

@ -789,6 +789,8 @@ drm_atomic_private_obj_init(struct drm_device *dev,
obj->state = state;
obj->funcs = funcs;
list_add_tail(&obj->head, &dev->mode_config.privobj_list);
state->obj = obj;
}
EXPORT_SYMBOL(drm_atomic_private_obj_init);
@ -1423,8 +1425,12 @@ EXPORT_SYMBOL(drm_atomic_check_only);
int drm_atomic_commit(struct drm_atomic_state *state)
{
struct drm_mode_config *config = &state->dev->mode_config;
struct drm_printer p = drm_info_printer(state->dev->dev);
int ret;
if (drm_debug_enabled(DRM_UT_STATE))
drm_atomic_print_new_state(state, &p);
ret = drm_atomic_check_only(state);
if (ret)
return ret;
@ -1632,6 +1638,15 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
}
EXPORT_SYMBOL(__drm_atomic_helper_set_config);
static void drm_atomic_private_obj_print_state(struct drm_printer *p,
const struct drm_private_state *state)
{
struct drm_private_obj *obj = state->obj;
if (obj->funcs->atomic_print_state)
obj->funcs->atomic_print_state(p, state);
}
/**
* drm_atomic_print_new_state - prints drm atomic state
* @state: atomic configuration to check
@ -1652,6 +1667,8 @@ void drm_atomic_print_new_state(const struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct drm_private_obj *obj;
struct drm_private_state *obj_state;
int i;
if (!p) {
@ -1669,6 +1686,9 @@ void drm_atomic_print_new_state(const struct drm_atomic_state *state,
for_each_new_connector_in_state(state, connector, connector_state, i)
drm_atomic_connector_print_state(p, connector_state);
for_each_new_private_obj_in_state(state, obj, obj_state, i)
drm_atomic_private_obj_print_state(p, obj_state);
}
EXPORT_SYMBOL(drm_atomic_print_new_state);

View file

@ -1328,7 +1328,6 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_out_fence_state *fence_state;
int ret = 0;
unsigned int i, j, num_fences;
struct drm_printer p = drm_info_printer(dev->dev);
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@ -1460,9 +1459,6 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_nonblocking_commit(state);
} else {
if (drm_debug_enabled(DRM_UT_STATE))
drm_atomic_print_new_state(state, &p);
ret = drm_atomic_commit(state);
}

View file

@ -317,7 +317,7 @@ EXPORT_SYMBOL(drm_plane_create_rotation_property);
* DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 |
* DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_Y);
*
* to eliminate the DRM_MODE_ROTATE_X flag. Depending on what kind of
* to eliminate the DRM_MODE_REFLECT_X flag. Depending on what kind of
* transforms the hardware supports, this function may not
* be able to produce a supported transform, so the caller should
* check the result afterwards.

View file

@ -384,8 +384,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector_type, ddc);
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
if (bridge_connector->bridge_hpd) {
connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_bridge_connector_enable_hpd(connector);
}
else if (bridge_connector->bridge_detect)
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;

File diff suppressed because it is too large Load diff

View file

@ -594,37 +594,26 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for
}
EXPORT_SYMBOL(drm_fb_blit_toio);
static void drm_fb_gray8_to_mono_reversed_line(u8 *dst, const u8 *src, unsigned int pixels,
unsigned int start_offset, unsigned int end_len)
static void drm_fb_gray8_to_mono_line(u8 *dst, const u8 *src, unsigned int pixels)
{
unsigned int xb, i;
while (pixels) {
unsigned int i, bits = min(pixels, 8U);
u8 byte = 0;
for (xb = 0; xb < pixels; xb++) {
unsigned int start = 0, end = 8;
u8 byte = 0x00;
if (xb == 0 && start_offset)
start = start_offset;
if (xb == pixels - 1 && end_len)
end = end_len;
for (i = start; i < end; i++) {
unsigned int x = xb * 8 + i;
byte >>= 1;
if (src[x] >> 7)
byte |= BIT(7);
for (i = 0; i < bits; i++, pixels--) {
if (*src++ >= 128)
byte |= BIT(i);
}
*dst++ = byte;
}
}
/**
* drm_fb_xrgb8888_to_mono_reversed - Convert XRGB8888 to reversed monochrome
* @dst: reversed monochrome destination buffer
* drm_fb_xrgb8888_to_mono - Convert XRGB8888 to monochrome
* @dst: monochrome destination buffer (0=black, 1=white)
* @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @src: XRGB8888 source buffer
* @vaddr: XRGB8888 source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
*
@ -633,17 +622,23 @@ static void drm_fb_gray8_to_mono_reversed_line(u8 *dst, const u8 *src, unsigned
* and use this function to convert to the native format.
*
* This function uses drm_fb_xrgb8888_to_gray8() to convert to grayscale and
* then the result is converted from grayscale to reversed monohrome.
* then the result is converted from grayscale to monochrome.
*
* The first pixel (upper left corner of the clip rectangle) will be converted
* and copied to the first bit (LSB) in the first byte of the monochrome
* destination buffer.
* If the caller requires that the first pixel in a byte must be located at an
* x-coordinate that is a multiple of 8, then the caller must take care itself
* of supplying a suitable clip rectangle.
*/
void drm_fb_xrgb8888_to_mono_reversed(void *dst, unsigned int dst_pitch, const void *vaddr,
const struct drm_framebuffer *fb, const struct drm_rect *clip)
void drm_fb_xrgb8888_to_mono(void *dst, unsigned int dst_pitch, const void *vaddr,
const struct drm_framebuffer *fb, const struct drm_rect *clip)
{
unsigned int linepixels = drm_rect_width(clip);
unsigned int lines = clip->y2 - clip->y1;
unsigned int lines = drm_rect_height(clip);
unsigned int cpp = fb->format->cpp[0];
unsigned int len_src32 = linepixels * cpp;
struct drm_device *dev = fb->dev;
unsigned int start_offset, end_len;
unsigned int y;
u8 *mono = dst, *gray8;
u32 *src32;
@ -652,21 +647,18 @@ void drm_fb_xrgb8888_to_mono_reversed(void *dst, unsigned int dst_pitch, const v
return;
/*
* The reversed mono destination buffer contains 1 bit per pixel
* and destination scanlines have to be in multiple of 8 pixels.
* The mono destination buffer contains 1 bit per pixel
*/
if (!dst_pitch)
dst_pitch = DIV_ROUND_UP(linepixels, 8);
drm_WARN_ONCE(dev, dst_pitch % 8 != 0, "dst_pitch is not a multiple of 8\n");
/*
* The cma memory is write-combined so reads are uncached.
* Speed up by fetching one line at a time.
*
* Also, format conversion from XR24 to reversed monochrome
* are done line-by-line but are converted to 8-bit grayscale
* as an intermediate step.
* Also, format conversion from XR24 to monochrome are done
* line-by-line but are converted to 8-bit grayscale as an
* intermediate step.
*
* Allocate a buffer to be used for both copying from the cma
* memory and to store the intermediate grayscale line pixels.
@ -677,27 +669,15 @@ void drm_fb_xrgb8888_to_mono_reversed(void *dst, unsigned int dst_pitch, const v
gray8 = (u8 *)src32 + len_src32;
/*
* For damage handling, it is possible that only parts of the source
* buffer is copied and this could lead to start and end pixels that
* are not aligned to multiple of 8.
*
* Calculate if the start and end pixels are not aligned and set the
* offsets for the reversed mono line conversion function to adjust.
*/
start_offset = clip->x1 % 8;
end_len = clip->x2 % 8;
vaddr += clip_offset(clip, fb->pitches[0], cpp);
for (y = 0; y < lines; y++) {
src32 = memcpy(src32, vaddr, len_src32);
drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels);
drm_fb_gray8_to_mono_reversed_line(mono, gray8, dst_pitch,
start_offset, end_len);
drm_fb_gray8_to_mono_line(mono, gray8, linepixels);
vaddr += fb->pitches[0];
mono += dst_pitch;
}
kfree(src32);
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono_reversed);
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);

View file

@ -1273,83 +1273,3 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
ww_acquire_fini(acquire_ctx);
}
EXPORT_SYMBOL(drm_gem_unlock_reservations);
/**
* drm_gem_fence_array_add - Adds the fence to an array of fences to be
* waited on, deduplicating fences from the same context.
*
* @fence_array: array of dma_fence * for the job to block on.
* @fence: the dma_fence to add to the list of dependencies.
*
* This functions consumes the reference for @fence both on success and error
* cases.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
int drm_gem_fence_array_add(struct xarray *fence_array,
struct dma_fence *fence)
{
struct dma_fence *entry;
unsigned long index;
u32 id = 0;
int ret;
if (!fence)
return 0;
/* Deduplicate if we already depend on a fence from the same context.
* This lets the size of the array of deps scale with the number of
* engines involved, rather than the number of BOs.
*/
xa_for_each(fence_array, index, entry) {
if (entry->context != fence->context)
continue;
if (dma_fence_is_later(fence, entry)) {
dma_fence_put(entry);
xa_store(fence_array, index, fence, GFP_KERNEL);
} else {
dma_fence_put(fence);
}
return 0;
}
ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
if (ret != 0)
dma_fence_put(fence);
return ret;
}
EXPORT_SYMBOL(drm_gem_fence_array_add);
/**
* drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
* in the GEM object's reservation object to an array of dma_fences for use in
* scheduling a rendering job.
*
* This should be called after drm_gem_lock_reservations() on your array of
* GEM objects used in the job but before updating the reservations with your
* own fences.
*
* @fence_array: array of dma_fence * for the job to block on.
* @obj: the gem object to add new dependencies from.
* @write: whether the job might write the object (so we need to depend on
* shared fences in the reservation object).
*/
int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
struct drm_gem_object *obj,
bool write)
{
struct dma_resv_iter cursor;
struct dma_fence *fence;
int ret = 0;
dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
ret = drm_gem_fence_array_add(fence_array, fence);
if (ret)
break;
}
return ret;
}
EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);

View file

@ -143,25 +143,21 @@
*/
int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
{
struct dma_resv_iter cursor;
struct drm_gem_object *obj;
struct dma_fence *fence;
int ret;
if (!state->fb)
return 0;
obj = drm_gem_fb_get_obj(state->fb, 0);
dma_resv_iter_begin(&cursor, obj->resv, false);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
/* TODO: Currently there should be only one write fence, so this
* here works fine. But drm_atomic_set_fence_for_plane() should
* be changed to be able to handle more fences in general for
* multiple BOs per fb anyway. */
dma_fence_get(fence);
break;
}
dma_resv_iter_end(&cursor);
ret = dma_resv_get_singleton(obj->resv, false, &fence);
if (ret)
return ret;
/* TODO: drm_atomic_set_fence_for_plane() should be changed to be able
* to handle more fences in general for multiple BOs per fb.
*/
drm_atomic_set_fence_for_plane(state, fence);
return 0;
}

View file

@ -867,7 +867,7 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
if (!tt)
return NULL;
ret = ttm_tt_init(tt, bo, page_flags, ttm_cached);
ret = ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
if (ret < 0)
goto err_ttm_tt_init;

View file

@ -941,6 +941,23 @@ void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *
}
EXPORT_SYMBOL(drm_mode_copy);
/**
* drm_mode_init - initialize the mode from another mode
* @dst: mode to overwrite
* @src: mode to copy
*
* Copy an existing mode into another mode, zeroing the
* list head of the destination mode. Typically used
* to guarantee the list head is not left with stack
* garbage in on-stack modes.
*/
void drm_mode_init(struct drm_display_mode *dst, const struct drm_display_mode *src)
{
memset(dst, 0, sizeof(*dst));
drm_mode_copy(dst, src);
}
EXPORT_SYMBOL(drm_mode_init);
/**
* drm_mode_duplicate - allocate and duplicate an existing mode
* @dev: drm_device to allocate the duplicated mode for

View file

@ -80,9 +80,6 @@ struct etnaviv_gem_submit_bo {
u64 va;
struct etnaviv_gem_object *obj;
struct etnaviv_vram_mapping *mapping;
struct dma_fence *excl;
unsigned int nr_shared;
struct dma_fence **shared;
};
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
@ -95,7 +92,7 @@ struct etnaviv_gem_submit {
struct etnaviv_file_private *ctx;
struct etnaviv_gpu *gpu;
struct etnaviv_iommu_context *mmu_context, *prev_mmu_context;
struct dma_fence *out_fence, *in_fence;
struct dma_fence *out_fence;
int out_fence_id;
struct list_head node; /* GPU active submit list */
struct etnaviv_cmdbuf cmdbuf;

View file

@ -179,24 +179,18 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
struct dma_resv *robj = bo->obj->base.resv;
if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
ret = dma_resv_reserve_shared(robj, 1);
if (ret)
return ret;
}
ret = dma_resv_reserve_fences(robj, 1);
if (ret)
return ret;
if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
continue;
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
ret = dma_resv_get_fences(robj, true, &bo->nr_shared,
&bo->shared);
if (ret)
return ret;
} else {
bo->excl = dma_fence_get(dma_resv_excl_fence(robj));
}
ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
&bo->obj->base,
bo->flags & ETNA_SUBMIT_BO_WRITE);
if (ret)
return ret;
}
return ret;
@ -402,8 +396,6 @@ static void submit_cleanup(struct kref *kref)
wake_up_all(&submit->gpu->fence_event);
if (submit->in_fence)
dma_fence_put(submit->in_fence);
if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */
mutex_lock(&submit->gpu->fence_lock);
@ -534,58 +526,69 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
ALIGN(args->stream_size, 8) + 8);
if (ret)
goto err_submit_objects;
goto err_submit_put;
submit->ctx = file->driver_priv;
submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
submit->exec_state = args->exec_state;
submit->flags = args->flags;
ret = drm_sched_job_init(&submit->sched_job,
&ctx->sched_entity[args->pipe],
submit->ctx);
if (ret)
goto err_submit_put;
ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
if (ret)
goto err_submit_objects;
goto err_submit_job;
if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
relocs, args->nr_relocs)) {
ret = -EINVAL;
goto err_submit_objects;
goto err_submit_job;
}
if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
submit->in_fence = sync_file_get_fence(args->fence_fd);
if (!submit->in_fence) {
struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence) {
ret = -EINVAL;
goto err_submit_objects;
goto err_submit_job;
}
ret = drm_sched_job_add_dependency(&submit->sched_job,
in_fence);
if (ret)
goto err_submit_job;
}
ret = submit_pin_objects(submit);
if (ret)
goto err_submit_objects;
goto err_submit_job;
ret = submit_reloc(submit, stream, args->stream_size / 4,
relocs, args->nr_relocs);
if (ret)
goto err_submit_objects;
goto err_submit_job;
ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
if (ret)
goto err_submit_objects;
goto err_submit_job;
memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
ret = submit_lock_objects(submit, &ticket);
if (ret)
goto err_submit_objects;
goto err_submit_job;
ret = submit_fence_sync(submit);
if (ret)
goto err_submit_objects;
goto err_submit_job;
ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
ret = etnaviv_sched_push_job(submit);
if (ret)
goto err_submit_objects;
goto err_submit_job;
submit_attach_object_fences(submit);
@ -599,7 +602,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
sync_file = sync_file_create(submit->out_fence);
if (!sync_file) {
ret = -ENOMEM;
goto err_submit_objects;
goto err_submit_job;
}
fd_install(out_fence_fd, sync_file->file);
}
@ -607,7 +610,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
args->fence_fd = out_fence_fd;
args->fence = submit->out_fence_id;
err_submit_objects:
err_submit_job:
drm_sched_job_cleanup(&submit->sched_job);
err_submit_put:
etnaviv_submit_put(submit);
err_submit_ww_acquire:

View file

@ -17,58 +17,6 @@ module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
static int etnaviv_hw_jobs_limit = 4;
module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
static struct dma_fence *
etnaviv_sched_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct dma_fence *fence;
int i;
if (unlikely(submit->in_fence)) {
fence = submit->in_fence;
submit->in_fence = NULL;
if (!dma_fence_is_signaled(fence))
return fence;
dma_fence_put(fence);
}
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
int j;
if (bo->excl) {
fence = bo->excl;
bo->excl = NULL;
if (!dma_fence_is_signaled(fence))
return fence;
dma_fence_put(fence);
}
for (j = 0; j < bo->nr_shared; j++) {
if (!bo->shared[j])
continue;
fence = bo->shared[j];
bo->shared[j] = NULL;
if (!dma_fence_is_signaled(fence))
return fence;
dma_fence_put(fence);
}
kfree(bo->shared);
bo->nr_shared = 0;
bo->shared = NULL;
}
return NULL;
}
static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
@ -142,29 +90,22 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
}
static const struct drm_sched_backend_ops etnaviv_sched_ops = {
.dependency = etnaviv_sched_dependency,
.run_job = etnaviv_sched_run_job,
.timedout_job = etnaviv_sched_timedout_job,
.free_job = etnaviv_sched_free_job,
};
int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
struct etnaviv_gem_submit *submit)
int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
{
int ret = 0;
/*
* Hold the fence lock across the whole operation to avoid jobs being
* pushed out of order with regard to their sched fence seqnos as
* allocated in drm_sched_job_init.
* allocated in drm_sched_job_arm.
*/
mutex_lock(&submit->gpu->fence_lock);
ret = drm_sched_job_init(&submit->sched_job, sched_entity,
submit->ctx);
if (ret)
goto out_unlock;
drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);

View file

@ -18,7 +18,6 @@ struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
int etnaviv_sched_init(struct etnaviv_gpu *gpu);
void etnaviv_sched_fini(struct etnaviv_gpu *gpu);
int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
struct etnaviv_gem_submit *submit);
int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit);
#endif /* __ETNAVIV_SCHED_H__ */

View file

@ -24,9 +24,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@ -253,9 +251,7 @@ struct exynos_dsi_driver_data {
struct exynos_dsi {
struct drm_encoder encoder;
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
struct drm_panel *panel;
struct list_head bridge_chain;
struct drm_bridge bridge;
struct drm_bridge *out_bridge;
struct device *dev;
struct drm_display_mode mode;
@ -285,11 +281,10 @@ struct exynos_dsi {
};
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
static inline struct exynos_dsi *encoder_to_dsi(struct drm_encoder *e)
static inline struct exynos_dsi *bridge_to_dsi(struct drm_bridge *b)
{
return container_of(e, struct exynos_dsi, encoder);
return container_of(b, struct exynos_dsi, bridge);
}
enum reg_idx {
@ -1365,10 +1360,10 @@ static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
}
}
static void exynos_dsi_enable(struct drm_encoder *encoder)
static void exynos_dsi_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_bridge *iter;
struct exynos_dsi *dsi = bridge_to_dsi(bridge);
int ret;
if (dsi->state & DSIM_STATE_ENABLED)
@ -1381,151 +1376,70 @@ static void exynos_dsi_enable(struct drm_encoder *encoder)
}
dsi->state |= DSIM_STATE_ENABLED;
}
if (dsi->panel) {
ret = drm_panel_prepare(dsi->panel);
if (ret < 0)
goto err_put_sync;
} else {
list_for_each_entry_reverse(iter, &dsi->bridge_chain,
chain_node) {
if (iter->funcs->pre_enable)
iter->funcs->pre_enable(iter);
}
}
static void exynos_dsi_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct exynos_dsi *dsi = bridge_to_dsi(bridge);
exynos_dsi_set_display_mode(dsi);
exynos_dsi_set_display_enable(dsi, true);
if (dsi->panel) {
ret = drm_panel_enable(dsi->panel);
if (ret < 0)
goto err_display_disable;
} else {
list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
if (iter->funcs->enable)
iter->funcs->enable(iter);
}
}
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
return;
err_display_disable:
exynos_dsi_set_display_enable(dsi, false);
drm_panel_unprepare(dsi->panel);
err_put_sync:
dsi->state &= ~DSIM_STATE_ENABLED;
pm_runtime_put(dsi->dev);
}
static void exynos_dsi_disable(struct drm_encoder *encoder)
static void exynos_dsi_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_bridge *iter;
struct exynos_dsi *dsi = bridge_to_dsi(bridge);
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
}
drm_panel_disable(dsi->panel);
list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
if (iter->funcs->disable)
iter->funcs->disable(iter);
}
static void exynos_dsi_atomic_post_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct exynos_dsi *dsi = bridge_to_dsi(bridge);
exynos_dsi_set_display_enable(dsi, false);
drm_panel_unprepare(dsi->panel);
list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
if (iter->funcs->post_disable)
iter->funcs->post_disable(iter);
}
dsi->state &= ~DSIM_STATE_ENABLED;
pm_runtime_put_sync(dsi->dev);
}
static void exynos_dsi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
static void exynos_dsi_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct exynos_dsi *dsi = bridge_to_dsi(bridge);
drm_mode_copy(&dsi->mode, adjusted_mode);
}
static enum drm_connector_status
exynos_dsi_detect(struct drm_connector *connector, bool force)
static int exynos_dsi_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
return connector->status;
struct exynos_dsi *dsi = bridge_to_dsi(bridge);
return drm_bridge_attach(bridge->encoder, dsi->out_bridge, NULL, flags);
}
static void exynos_dsi_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
connector->dev = NULL;
}
static const struct drm_connector_funcs exynos_dsi_connector_funcs = {
.detect = exynos_dsi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = exynos_dsi_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int exynos_dsi_get_modes(struct drm_connector *connector)
{
struct exynos_dsi *dsi = connector_to_dsi(connector);
if (dsi->panel)
return drm_panel_get_modes(dsi->panel, connector);
return 0;
}
static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
.get_modes = exynos_dsi_get_modes,
};
static int exynos_dsi_create_connector(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_connector *connector = &dsi->connector;
struct drm_device *drm = encoder->dev;
int ret;
connector->polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
if (ret) {
DRM_DEV_ERROR(dsi->dev,
"Failed to initialize connector with drm\n");
return ret;
}
connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
if (!drm->registered)
return 0;
connector->funcs->reset(connector);
drm_connector_register(connector);
return 0;
}
static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
.enable = exynos_dsi_enable,
.disable = exynos_dsi_disable,
.mode_set = exynos_dsi_mode_set,
static const struct drm_bridge_funcs exynos_dsi_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_pre_enable = exynos_dsi_atomic_pre_enable,
.atomic_enable = exynos_dsi_atomic_enable,
.atomic_disable = exynos_dsi_atomic_disable,
.atomic_post_disable = exynos_dsi_atomic_post_disable,
.mode_set = exynos_dsi_mode_set,
.attach = exynos_dsi_attach,
};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
@ -1534,33 +1448,24 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct exynos_dsi *dsi = host_to_dsi(host);
struct device *dev = dsi->dev;
struct drm_encoder *encoder = &dsi->encoder;
struct drm_device *drm = encoder->dev;
struct drm_bridge *out_bridge;
int ret;
out_bridge = of_drm_find_bridge(device->dev.of_node);
if (out_bridge) {
drm_bridge_attach(encoder, out_bridge, NULL, 0);
dsi->out_bridge = out_bridge;
list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
} else {
int ret = exynos_dsi_create_connector(encoder);
if (ret) {
DRM_DEV_ERROR(dsi->dev,
"failed to create connector ret = %d\n",
ret);
drm_encoder_cleanup(encoder);
return ret;
}
dsi->panel = of_drm_find_panel(device->dev.of_node);
if (IS_ERR(dsi->panel))
dsi->panel = NULL;
else
dsi->connector.status = connector_status_connected;
dsi->out_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
if (IS_ERR(dsi->out_bridge)) {
ret = PTR_ERR(dsi->out_bridge);
DRM_DEV_ERROR(dev, "failed to find the bridge: %d\n", ret);
return ret;
}
DRM_DEV_INFO(dev, "Attached %s device\n", device->name);
drm_bridge_add(&dsi->bridge);
drm_bridge_attach(encoder, &dsi->bridge, NULL, 0);
/*
* This is a temporary solution and should be made by more generic way.
*
@ -1568,7 +1473,7 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
* TE interrupt handler.
*/
if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
int ret = exynos_dsi_register_te_irq(dsi, &device->dev);
ret = exynos_dsi_register_te_irq(dsi, &device->dev);
if (ret)
return ret;
}
@ -1595,24 +1500,17 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
struct exynos_dsi *dsi = host_to_dsi(host);
struct drm_device *drm = dsi->encoder.dev;
if (dsi->panel) {
mutex_lock(&drm->mode_config.mutex);
exynos_dsi_disable(&dsi->encoder);
dsi->panel = NULL;
dsi->connector.status = connector_status_disconnected;
mutex_unlock(&drm->mode_config.mutex);
} else {
if (dsi->out_bridge->funcs->detach)
dsi->out_bridge->funcs->detach(dsi->out_bridge);
dsi->out_bridge = NULL;
INIT_LIST_HEAD(&dsi->bridge_chain);
}
if (dsi->out_bridge->funcs->detach)
dsi->out_bridge->funcs->detach(dsi->out_bridge);
dsi->out_bridge = NULL;
if (drm->mode_config.poll_enabled)
drm_kms_helper_hotplug_event(drm);
exynos_dsi_unregister_te_irq(dsi);
drm_bridge_remove(&dsi->bridge);
return 0;
}
@ -1662,11 +1560,6 @@ static int exynos_dsi_of_read_u32(const struct device_node *np,
return ret;
}
enum {
DSI_PORT_IN,
DSI_PORT_OUT
};
static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
{
struct device *dev = dsi->dev;
@ -1697,26 +1590,14 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
struct exynos_dsi *dsi = dev_get_drvdata(dev);
struct drm_encoder *encoder = &dsi->encoder;
struct drm_device *drm_dev = data;
struct device_node *in_bridge_node;
struct drm_bridge *in_bridge;
int ret;
drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
if (ret < 0)
return ret;
in_bridge_node = of_graph_get_remote_node(dev->of_node, DSI_PORT_IN, 0);
if (in_bridge_node) {
in_bridge = of_drm_find_bridge(in_bridge_node);
if (in_bridge)
drm_bridge_attach(encoder, in_bridge, NULL, 0);
of_node_put(in_bridge_node);
}
return mipi_dsi_host_register(&dsi->dsi_host);
}
@ -1724,9 +1605,8 @@ static void exynos_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct exynos_dsi *dsi = dev_get_drvdata(dev);
struct drm_encoder *encoder = &dsi->encoder;
exynos_dsi_disable(encoder);
exynos_dsi_atomic_disable(&dsi->bridge, NULL);
mipi_dsi_host_unregister(&dsi->dsi_host);
}
@ -1749,7 +1629,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
INIT_LIST_HEAD(&dsi->transfer_list);
INIT_LIST_HEAD(&dsi->bridge_chain);
dsi->dsi_host.ops = &exynos_dsi_ops;
dsi->dsi_host.dev = dev;
@ -1817,6 +1696,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
dsi->bridge.funcs = &exynos_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
ret = component_add(dev, &exynos_dsi_component_ops);
if (ret)
goto err_disable_runtime;

View file

@ -102,6 +102,7 @@ struct exynos_mic {
struct videomode vm;
struct drm_encoder *encoder;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
bool enabled;
};
@ -298,12 +299,22 @@ static void mic_pre_enable(struct drm_bridge *bridge)
static void mic_enable(struct drm_bridge *bridge) { }
static int mic_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct exynos_mic *mic = bridge->driver_private;
return drm_bridge_attach(bridge->encoder, mic->next_bridge,
&mic->bridge, flags);
}
static const struct drm_bridge_funcs mic_bridge_funcs = {
.disable = mic_disable,
.post_disable = mic_post_disable,
.mode_set = mic_mode_set,
.pre_enable = mic_pre_enable,
.enable = mic_enable,
.attach = mic_attach,
};
static int exynos_mic_bind(struct device *dev, struct device *master,
@ -377,6 +388,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_mic *mic;
struct device_node *remote;
struct resource res;
int ret, i;
@ -420,6 +432,16 @@ static int exynos_mic_probe(struct platform_device *pdev)
}
}
remote = of_graph_get_remote_node(dev->of_node, 1, 0);
mic->next_bridge = of_drm_find_bridge(remote);
if (IS_ERR(mic->next_bridge)) {
DRM_DEV_ERROR(dev, "mic: Failed to find next bridge\n");
ret = PTR_ERR(mic->next_bridge);
goto err;
}
of_node_put(remote);
platform_set_drvdata(pdev, mic);
mic->bridge.funcs = &mic_bridge_funcs;

View file

@ -262,6 +262,7 @@ static int cdv_save_display_registers(struct drm_device *dev)
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
dev_dbg(dev->dev, "Saving GPU registers.\n");
@ -298,8 +299,10 @@ static int cdv_save_display_registers(struct drm_device *dev)
regs->cdv.saveIER = REG_READ(PSB_INT_ENABLE_R);
regs->cdv.saveIMR = REG_READ(PSB_INT_MASK_R);
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
drm_connector_list_iter_end(&conn_iter);
return 0;
}
@ -317,6 +320,7 @@ static int cdv_restore_display_registers(struct drm_device *dev)
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
u32 temp;
@ -373,8 +377,10 @@ static int cdv_restore_display_registers(struct drm_device *dev)
drm_mode_config_reset(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
drm_connector_list_iter_end(&conn_iter);
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
@ -603,7 +609,6 @@ const struct psb_ops cdv_chip_ops = {
.errata = cdv_errata,
.crtc_helper = &cdv_intel_helper_funcs,
.crtc_funcs = &gma_intel_crtc_funcs,
.clock_funcs = &cdv_clock_funcs,
.output_init = cdv_output_init,

View file

@ -191,12 +191,12 @@ static enum drm_connector_status cdv_intel_crt_detect(
static void cdv_intel_crt_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
psb_intel_i2c_destroy(gma_encoder->ddc_bus);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
kfree(gma_connector);
}
static int cdv_intel_crt_get_modes(struct drm_connector *connector)
@ -281,8 +281,6 @@ void cdv_intel_crt_init(struct drm_device *dev,
drm_connector_helper_add(connector,
&cdv_intel_crt_connector_helper_funcs);
drm_connector_register(connector);
return;
failed_ddc:
drm_encoder_cleanup(&gma_encoder->base);

View file

@ -584,13 +584,14 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
bool ok;
bool is_lvds = false;
bool is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
const struct gma_limit_t *limit;
u32 ddi_select = 0;
bool is_edp = false;
list_for_each_entry(connector, &mode_config->connector_list, head) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct gma_encoder *gma_encoder =
gma_attached_encoder(connector);
@ -613,10 +614,14 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
is_edp = true;
break;
default:
drm_connector_list_iter_end(&conn_iter);
DRM_ERROR("invalid output type.\n");
return 0;
}
break;
}
drm_connector_list_iter_end(&conn_iter);
if (dev_priv->dplla_96mhz)
/* low-end sku, 96/100 mhz */

View file

@ -1857,6 +1857,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
static void
cdv_intel_dp_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = gma_encoder->dev_priv;
@ -1866,9 +1867,8 @@ cdv_intel_dp_destroy(struct drm_connector *connector)
intel_dp->panel_fixed_mode = NULL;
}
i2c_del_adapter(&intel_dp->adapter);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
kfree(gma_connector);
}
static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
@ -1990,8 +1990,6 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_connector_register(connector);
/* Set up the DDC bus. */
switch (output_reg) {
case DP_B:

View file

@ -242,12 +242,12 @@ static enum drm_mode_status cdv_hdmi_mode_valid(struct drm_connector *connector,
static void cdv_hdmi_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
psb_intel_i2c_destroy(gma_encoder->i2c_bus);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
kfree(gma_connector);
}
static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
@ -352,7 +352,6 @@ void cdv_hdmi_init(struct drm_device *dev,
hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
hdmi_priv->dev = dev;
drm_connector_register(connector);
return;
failed_ddc:

View file

@ -326,12 +326,12 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
*/
static void cdv_intel_lvds_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
psb_intel_i2c_destroy(gma_encoder->i2c_bus);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
kfree(gma_connector);
}
static int cdv_intel_lvds_set_property(struct drm_connector *connector,
@ -647,7 +647,6 @@ void cdv_intel_lvds_init(struct drm_device *dev,
out:
mutex_unlock(&dev->mode_config.mutex);
drm_connector_register(connector);
return;
failed_find:

View file

@ -451,6 +451,7 @@ static const struct drm_mode_config_funcs psb_mode_funcs = {
static void psb_setup_outputs(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
drm_mode_create_scaling_mode_property(dev);
@ -461,8 +462,8 @@ static void psb_setup_outputs(struct drm_device *dev)
"backlight", 0, 100);
dev_priv->ops->output_init(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct drm_encoder *encoder = &gma_encoder->base;
int crtc_mask = 0, clone_mask = 0;
@ -505,6 +506,7 @@ static void psb_setup_outputs(struct drm_device *dev)
encoder->possible_clones =
gma_connector_clones(dev, clone_mask);
}
drm_connector_list_iter_end(&conn_iter);
}
void psb_modeset_init(struct drm_device *dev)
@ -514,7 +516,8 @@ void psb_modeset_init(struct drm_device *dev)
struct pci_dev *pdev = to_pci_dev(dev->dev);
int i;
drm_mode_config_init(dev);
if (drmm_mode_config_init(dev))
return;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@ -546,6 +549,5 @@ void psb_modeset_cleanup(struct drm_device *dev)
if (dev_priv->modeset) {
drm_kms_helper_poll_fini(dev);
psb_fbdev_fini(dev);
drm_mode_config_cleanup(dev);
}
}

View file

@ -21,6 +21,10 @@
#include "gem.h"
#include "psb_drv.h"
/*
* PSB GEM object
*/
int psb_gem_pin(struct psb_gem_object *pobj)
{
struct drm_gem_object *obj = &pobj->base;
@ -31,7 +35,9 @@ int psb_gem_pin(struct psb_gem_object *pobj)
unsigned int npages;
int ret;
mutex_lock(&dev_priv->gtt_mutex);
ret = dma_resv_lock(obj->resv, NULL);
if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
return ret;
if (pobj->in_gart || pobj->stolen)
goto out; /* already mapped */
@ -39,7 +45,7 @@ int psb_gem_pin(struct psb_gem_object *pobj)
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto err_mutex_unlock;
goto err_dma_resv_unlock;
}
npages = obj->size / PAGE_SIZE;
@ -51,17 +57,16 @@ int psb_gem_pin(struct psb_gem_object *pobj)
(gpu_base + pobj->offset), npages, 0, 0,
PSB_MMU_CACHED_MEMORY);
pobj->npage = npages;
pobj->pages = pages;
out:
++pobj->in_gart;
mutex_unlock(&dev_priv->gtt_mutex);
dma_resv_unlock(obj->resv);
return 0;
err_mutex_unlock:
mutex_unlock(&dev_priv->gtt_mutex);
err_dma_resv_unlock:
dma_resv_unlock(obj->resv);
return ret;
}
@ -71,8 +76,12 @@ void psb_gem_unpin(struct psb_gem_object *pobj)
struct drm_device *dev = obj->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gpu_base = dev_priv->gtt.gatt_start;
unsigned long npages;
int ret;
mutex_lock(&dev_priv->gtt_mutex);
ret = dma_resv_lock(obj->resv, NULL);
if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
return;
WARN_ON(!pobj->in_gart);
@ -81,19 +90,20 @@ void psb_gem_unpin(struct psb_gem_object *pobj)
if (pobj->in_gart || pobj->stolen)
goto out;
npages = obj->size / PAGE_SIZE;
psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
(gpu_base + pobj->offset), pobj->npage, 0, 0);
(gpu_base + pobj->offset), npages, 0, 0);
psb_gtt_remove_pages(dev_priv, &pobj->resource);
/* Reset caching flags */
set_pages_array_wb(pobj->pages, pobj->npage);
set_pages_array_wb(pobj->pages, npages);
drm_gem_put_pages(obj, pobj->pages, true, false);
pobj->pages = NULL;
pobj->npage = 0;
out:
mutex_unlock(&dev_priv->gtt_mutex);
dma_resv_unlock(obj->resv);
}
static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
@ -290,3 +300,132 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
return ret;
}
/*
* Memory management
*/
/* Insert vram stolen pages into the GTT. */
static void psb_gem_mm_populate_stolen(struct drm_psb_private *pdev)
{
struct drm_device *dev = &pdev->dev;
unsigned int pfn_base;
unsigned int i, num_pages;
uint32_t pte;
pfn_base = pdev->stolen_base >> PAGE_SHIFT;
num_pages = pdev->vram_stolen_size >> PAGE_SHIFT;
drm_dbg(dev, "Set up %u stolen pages starting at 0x%08x, GTT offset %dK\n",
num_pages, pfn_base << PAGE_SHIFT, 0);
for (i = 0; i < num_pages; ++i) {
pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
iowrite32(pte, pdev->gtt_map + i);
}
(void)ioread32(pdev->gtt_map + i - 1);
}
int psb_gem_mm_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long stolen_size, vram_stolen_size;
struct psb_gtt *pg;
int ret;
mutex_init(&dev_priv->mmap_mutex);
pg = &dev_priv->gtt;
pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
stolen_size = vram_stolen_size;
dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
dev_priv->stolen_base, vram_stolen_size / 1024);
pg->stolen_size = stolen_size;
dev_priv->vram_stolen_size = vram_stolen_size;
dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
if (!dev_priv->vram_addr) {
dev_err(dev->dev, "Failure to map stolen base.\n");
ret = -ENOMEM;
goto err_mutex_destroy;
}
psb_gem_mm_populate_stolen(dev_priv);
return 0;
err_mutex_destroy:
mutex_destroy(&dev_priv->mmap_mutex);
return ret;
}
void psb_gem_mm_fini(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iounmap(dev_priv->vram_addr);
mutex_destroy(&dev_priv->mmap_mutex);
}
/* Re-insert all pinned GEM objects into GTT. */
static void psb_gem_mm_populate_resources(struct drm_psb_private *pdev)
{
unsigned int restored = 0, total = 0, size = 0;
struct resource *r = pdev->gtt_mem->child;
struct drm_device *dev = &pdev->dev;
struct psb_gem_object *pobj;
while (r) {
/*
* TODO: GTT restoration needs a refactoring, so that we don't have to touch
* struct psb_gem_object here. The type represents a GEM object and is
* not related to the GTT itself.
*/
pobj = container_of(r, struct psb_gem_object, resource);
if (pobj->pages) {
psb_gtt_insert_pages(pdev, &pobj->resource, pobj->pages);
size += resource_size(&pobj->resource);
++restored;
}
r = r->sibling;
++total;
}
drm_dbg(dev, "Restored %u of %u gtt ranges (%u KB)", restored, total, (size / 1024));
}
int psb_gem_mm_resume(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long stolen_size, vram_stolen_size;
struct psb_gtt *pg;
pg = &dev_priv->gtt;
pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
stolen_size = vram_stolen_size;
dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n", dev_priv->stolen_base,
vram_stolen_size / 1024);
if (stolen_size != pg->stolen_size) {
dev_err(dev->dev, "GTT resume error.\n");
return -EINVAL;
}
psb_gem_mm_populate_stolen(dev_priv);
psb_gem_mm_populate_resources(dev_priv);
return 0;
}

View file

@ -14,6 +14,10 @@
struct drm_device;
/*
* PSB GEM object
*/
struct psb_gem_object {
struct drm_gem_object base;
@ -23,7 +27,6 @@ struct psb_gem_object {
bool stolen; /* Backed from stolen RAM */
bool mmapping; /* Is mmappable */
struct page **pages; /* Backing pages if present */
int npage; /* Number of backing pages */
};
static inline struct psb_gem_object *to_psb_gem_object(struct drm_gem_object *obj)
@ -37,4 +40,12 @@ psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen,
int psb_gem_pin(struct psb_gem_object *pobj);
void psb_gem_unpin(struct psb_gem_object *pobj);
/*
* Memory management
*/
int psb_gem_mm_init(struct drm_device *dev);
void psb_gem_mm_fini(struct drm_device *dev);
int psb_gem_mm_resume(struct drm_device *dev);
#endif

View file

@ -17,7 +17,7 @@
#include "framebuffer.h"
#include "gem.h"
#include "gma_display.h"
#include "psb_drv.h"
#include "psb_irq.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
@ -27,17 +27,21 @@
bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *l_entry;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
list_for_each_entry(l_entry, &mode_config->connector_list, head) {
if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder && connector->encoder->crtc == crtc) {
struct gma_encoder *gma_encoder =
gma_attached_encoder(l_entry);
if (gma_encoder->type == type)
gma_attached_encoder(connector);
if (gma_encoder->type == type) {
drm_connector_list_iter_end(&conn_iter);
return true;
}
}
}
drm_connector_list_iter_end(&conn_iter);
return false;
}
@ -172,9 +176,9 @@ void gma_crtc_load_lut(struct drm_crtc *crtc)
}
}
int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
u32 size,
struct drm_modeset_acquire_ctx *ctx)
static int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, u32 size,
struct drm_modeset_acquire_ctx *ctx)
{
gma_crtc_load_lut(crtc);
@ -319,10 +323,9 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(DSPARB, 0x3F3E);
}
int gma_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width, uint32_t height)
static int gma_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv, uint32_t handle,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
@ -391,11 +394,9 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
goto unref_cursor;
}
/* Prevent overflow */
if (pobj->npage > 4)
cursor_pages = 4;
else
cursor_pages = pobj->npage;
cursor_pages = obj->size / PAGE_SIZE;
if (cursor_pages > 4)
cursor_pages = 4; /* Prevent overflow */
/* Copy the cursor to cursor mem */
tmp_dst = dev_priv->vram_addr + cursor_pobj->offset;
@ -437,7 +438,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
return ret;
}
int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
static int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct drm_device *dev = crtc->dev;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
@ -567,6 +568,18 @@ int gma_crtc_set_config(struct drm_mode_set *set,
return ret;
}
const struct drm_crtc_funcs gma_crtc_funcs = {
.cursor_set = gma_crtc_cursor_set,
.cursor_move = gma_crtc_cursor_move,
.gamma_set = gma_crtc_gamma_set,
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
.enable_vblank = gma_crtc_enable_vblank,
.disable_vblank = gma_crtc_disable_vblank,
.get_vblank_counter = gma_crtc_get_vblank_counter,
};
/*
* Save HW states of given crtc
*/

View file

@ -58,15 +58,7 @@ extern bool gma_pipe_has_type(struct drm_crtc *crtc, int type);
extern void gma_wait_for_vblank(struct drm_device *dev);
extern int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width, uint32_t height);
extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
extern void gma_crtc_load_lut(struct drm_crtc *crtc);
extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, u32 size,
struct drm_modeset_acquire_ctx *ctx);
extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
extern void gma_crtc_prepare(struct drm_crtc *crtc);
extern void gma_crtc_commit(struct drm_crtc *crtc);
@ -83,6 +75,8 @@ extern int gma_crtc_set_config(struct drm_mode_set *set,
extern void gma_crtc_save(struct drm_crtc *crtc);
extern void gma_crtc_restore(struct drm_crtc *crtc);
extern const struct drm_crtc_funcs gma_crtc_funcs;
extern void gma_encoder_prepare(struct drm_encoder *encoder);
extern void gma_encoder_commit(struct drm_encoder *encoder);
extern void gma_encoder_destroy(struct drm_encoder *encoder);

View file

@ -49,7 +49,7 @@ int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res
*
* Set the GTT entry for the appropriate memory type.
*/
static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
{
uint32_t mask = PSB_PTE_VALID;
@ -74,11 +74,7 @@ static u32 __iomem *psb_gtt_entry(struct drm_psb_private *pdev, const struct res
return pdev->gtt_map + (offset >> PAGE_SHIFT);
}
/*
* Take our preallocated GTT range and insert the GEM object into
* the GTT. This is protected via the gtt mutex which the caller
* must hold.
*/
/* Acquires GTT mutex internally. */
void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *res,
struct page **pages)
{
@ -86,6 +82,8 @@ void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *r
u32 __iomem *gtt_slot;
u32 pte;
mutex_lock(&pdev->gtt_mutex);
/* Write our page entries into the GTT itself */
npages = resource_size(res) >> PAGE_SHIFT;
@ -98,19 +96,19 @@ void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *r
/* Make sure all the entries are set before we return */
ioread32(gtt_slot - 1);
mutex_unlock(&pdev->gtt_mutex);
}
/*
* Remove a preallocated GTT range from the GTT. Overwrite all the
* page table entries with the dummy page. This is protected via the gtt
* mutex which the caller must hold.
*/
/* Acquires GTT mutex internally. */
void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *res)
{
resource_size_t npages, i;
u32 __iomem *gtt_slot;
u32 pte;
mutex_lock(&pdev->gtt_mutex);
/* Install scratch page for the resource */
pte = psb_gtt_mask_pte(page_to_pfn(pdev->scratch_page), PSB_MMU_CACHED_MEMORY);
@ -123,211 +121,192 @@ void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *r
/* Make sure all the entries are set before we return */
ioread32(gtt_slot - 1);
mutex_unlock(&pdev->gtt_mutex);
}
static void psb_gtt_alloc(struct drm_device *dev)
static int psb_gtt_enable(struct drm_psb_private *dev_priv)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
init_rwsem(&dev_priv->gtt.sem);
}
void psb_gtt_takedown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_device *dev = &dev_priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
if (dev_priv->gtt_map) {
iounmap(dev_priv->gtt_map);
dev_priv->gtt_map = NULL;
}
if (dev_priv->gtt_initialized) {
pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl);
PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
(void) PSB_RVDC32(PSB_PGETBL_CTL);
}
if (dev_priv->vram_addr)
iounmap(dev_priv->gtt_map);
}
int psb_gtt_init(struct drm_device *dev, int resume)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned gtt_pages;
unsigned long stolen_size, vram_stolen_size;
unsigned i, num_pages;
unsigned pfn_base;
struct psb_gtt *pg;
int ret = 0;
uint32_t pte;
if (!resume) {
mutex_init(&dev_priv->gtt_mutex);
mutex_init(&dev_priv->mmap_mutex);
psb_gtt_alloc(dev);
}
pg = &dev_priv->gtt;
/* Enable the GTT */
pci_read_config_word(pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
ret = pci_read_config_word(pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
if (ret)
return pcibios_err_to_errno(ret);
ret = pci_write_config_word(pdev, PSB_GMCH_CTRL, dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
if (ret)
return pcibios_err_to_errno(ret);
dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
(void) PSB_RVDC32(PSB_PGETBL_CTL);
(void)PSB_RVDC32(PSB_PGETBL_CTL);
return 0;
}
static void psb_gtt_disable(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = &dev_priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
pci_write_config_word(pdev, PSB_GMCH_CTRL, dev_priv->gmch_ctrl);
PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
(void)PSB_RVDC32(PSB_PGETBL_CTL);
}
void psb_gtt_fini(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iounmap(dev_priv->gtt_map);
psb_gtt_disable(dev_priv);
mutex_destroy(&dev_priv->gtt_mutex);
}
/* Clear GTT. Use a scratch page to avoid accidents or scribbles. */
static void psb_gtt_clear(struct drm_psb_private *pdev)
{
resource_size_t pfn_base;
unsigned long i;
uint32_t pte;
pfn_base = page_to_pfn(pdev->scratch_page);
pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
for (i = 0; i < pdev->gtt.gtt_pages; ++i)
iowrite32(pte, pdev->gtt_map + i);
(void)ioread32(pdev->gtt_map + i - 1);
}
static void psb_gtt_init_ranges(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = &dev_priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_gtt *pg = &dev_priv->gtt;
resource_size_t gtt_phys_start, mmu_gatt_start, gtt_start, gtt_pages,
gatt_start, gatt_pages;
struct resource *gtt_mem;
/* The root resource we allocate address space from */
dev_priv->gtt_initialized = 1;
pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
/*
* The video mmu has a hw bug when accessing 0x0D0000000.
* Make gatt start at 0x0e000,0000. This doesn't actually
* matter for us but may do if the video acceleration ever
* gets opened up.
* The video MMU has a HW bug when accessing 0x0d0000000. Make
* GATT start at 0x0e0000000. This doesn't actually matter for
* us now, but maybe will if the video acceleration ever gets
* opened up.
*/
pg->mmu_gatt_start = 0xE0000000;
mmu_gatt_start = 0xe0000000;
gtt_start = pci_resource_start(pdev, PSB_GTT_RESOURCE);
gtt_pages = pci_resource_len(pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
pg->gtt_start = pci_resource_start(pdev, PSB_GTT_RESOURCE);
gtt_pages = pci_resource_len(pdev, PSB_GTT_RESOURCE)
>> PAGE_SHIFT;
/* CDV doesn't report this. In which case the system has 64 gtt pages */
if (pg->gtt_start == 0 || gtt_pages == 0) {
if (!gtt_start || !gtt_pages) {
dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
gtt_pages = 64;
pg->gtt_start = dev_priv->pge_ctl;
gtt_start = dev_priv->pge_ctl;
}
pg->gatt_start = pci_resource_start(pdev, PSB_GATT_RESOURCE);
pg->gatt_pages = pci_resource_len(pdev, PSB_GATT_RESOURCE)
>> PAGE_SHIFT;
dev_priv->gtt_mem = &pdev->resource[PSB_GATT_RESOURCE];
gatt_start = pci_resource_start(pdev, PSB_GATT_RESOURCE);
gatt_pages = pci_resource_len(pdev, PSB_GATT_RESOURCE) >> PAGE_SHIFT;
if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
if (!gatt_pages || !gatt_start) {
static struct resource fudge; /* Preferably peppermint */
/* This can occur on CDV systems. Fudge it in this case.
We really don't care what imaginary space is being allocated
at this point */
/*
* This can occur on CDV systems. Fudge it in this case. We
* really don't care what imaginary space is being allocated
* at this point.
*/
dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
pg->gatt_start = 0x40000000;
pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
/* This is a little confusing but in fact the GTT is providing
a view from the GPU into memory and not vice versa. As such
this is really allocating space that is not the same as the
CPU address space on CDV */
gatt_start = 0x40000000;
gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
/*
* This is a little confusing but in fact the GTT is providing
* a view from the GPU into memory and not vice versa. As such
* this is really allocating space that is not the same as the
* CPU address space on CDV.
*/
fudge.start = 0x40000000;
fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
fudge.name = "fudge";
fudge.flags = IORESOURCE_MEM;
dev_priv->gtt_mem = &fudge;
}
pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
- PAGE_SIZE;
stolen_size = vram_stolen_size;
dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
dev_priv->stolen_base, vram_stolen_size / 1024);
if (resume && (gtt_pages != pg->gtt_pages) &&
(stolen_size != pg->stolen_size)) {
dev_err(dev->dev, "GTT resume error.\n");
ret = -EINVAL;
goto out_err;
gtt_mem = &fudge;
} else {
gtt_mem = &pdev->resource[PSB_GATT_RESOURCE];
}
pg->gtt_phys_start = gtt_phys_start;
pg->mmu_gatt_start = mmu_gatt_start;
pg->gtt_start = gtt_start;
pg->gtt_pages = gtt_pages;
pg->stolen_size = stolen_size;
dev_priv->vram_stolen_size = vram_stolen_size;
pg->gatt_start = gatt_start;
pg->gatt_pages = gatt_pages;
dev_priv->gtt_mem = gtt_mem;
}
/*
* Map the GTT and the stolen memory area
*/
if (!resume)
dev_priv->gtt_map = ioremap(pg->gtt_phys_start,
gtt_pages << PAGE_SHIFT);
int psb_gtt_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_gtt *pg = &dev_priv->gtt;
int ret;
mutex_init(&dev_priv->gtt_mutex);
ret = psb_gtt_enable(dev_priv);
if (ret)
goto err_mutex_destroy;
psb_gtt_init_ranges(dev_priv);
dev_priv->gtt_map = ioremap(pg->gtt_phys_start, pg->gtt_pages << PAGE_SHIFT);
if (!dev_priv->gtt_map) {
dev_err(dev->dev, "Failure to map gtt.\n");
ret = -ENOMEM;
goto out_err;
goto err_psb_gtt_disable;
}
if (!resume)
dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
stolen_size);
psb_gtt_clear(dev_priv);
if (!dev_priv->vram_addr) {
dev_err(dev->dev, "Failure to map stolen base.\n");
ret = -ENOMEM;
goto out_err;
}
/*
* Insert vram stolen pages into the GTT
*/
pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
num_pages = vram_stolen_size >> PAGE_SHIFT;
dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
num_pages, pfn_base << PAGE_SHIFT, 0);
for (i = 0; i < num_pages; ++i) {
pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
iowrite32(pte, dev_priv->gtt_map + i);
}
/*
* Init rest of GTT to the scratch page to avoid accidents or scribbles
*/
pfn_base = page_to_pfn(dev_priv->scratch_page);
pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
for (; i < gtt_pages; ++i)
iowrite32(pte, dev_priv->gtt_map + i);
(void) ioread32(dev_priv->gtt_map + i - 1);
return 0;
out_err:
psb_gtt_takedown(dev);
err_psb_gtt_disable:
psb_gtt_disable(dev_priv);
err_mutex_destroy:
mutex_destroy(&dev_priv->gtt_mutex);
return ret;
}
int psb_gtt_restore(struct drm_device *dev)
int psb_gtt_resume(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct resource *r = dev_priv->gtt_mem->child;
struct psb_gem_object *pobj;
unsigned int restored = 0, total = 0, size = 0;
struct psb_gtt *pg = &dev_priv->gtt;
unsigned int old_gtt_pages = pg->gtt_pages;
int ret;
/* On resume, the gtt_mutex is already initialized */
mutex_lock(&dev_priv->gtt_mutex);
psb_gtt_init(dev, 1);
/* Enable the GTT */
ret = psb_gtt_enable(dev_priv);
if (ret)
return ret;
while (r != NULL) {
/*
* TODO: GTT restoration needs a refactoring, so that we don't have to touch
* struct psb_gem_object here. The type represents a GEM object and is
* not related to the GTT itself.
*/
pobj = container_of(r, struct psb_gem_object, resource);
if (pobj->pages) {
psb_gtt_insert_pages(dev_priv, &pobj->resource, pobj->pages);
size += pobj->resource.end - pobj->resource.start;
restored++;
}
r = r->sibling;
total++;
psb_gtt_init_ranges(dev_priv);
if (old_gtt_pages != pg->gtt_pages) {
dev_err(dev->dev, "GTT resume error.\n");
ret = -ENODEV;
goto err_psb_gtt_disable;
}
mutex_unlock(&dev_priv->gtt_mutex);
DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
total, (size / 1024));
return 0;
psb_gtt_clear(dev_priv);
err_psb_gtt_disable:
psb_gtt_disable(dev_priv);
return ret;
}

View file

@ -22,18 +22,18 @@ struct psb_gtt {
unsigned gatt_pages;
unsigned long stolen_size;
unsigned long vram_stolen_size;
struct rw_semaphore sem;
};
/* Exported functions */
extern int psb_gtt_init(struct drm_device *dev, int resume);
extern void psb_gtt_takedown(struct drm_device *dev);
extern int psb_gtt_restore(struct drm_device *dev);
int psb_gtt_init(struct drm_device *dev);
void psb_gtt_fini(struct drm_device *dev);
int psb_gtt_resume(struct drm_device *dev);
int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res,
const char *name, resource_size_t size, resource_size_t align,
bool stolen, u32 *offset);
uint32_t psb_gtt_mask_pte(uint32_t pfn, int type);
void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *res,
struct page **pages);
void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *res);

View file

@ -372,9 +372,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
bool ok, is_sdvo = false;
bool is_lvds = false;
bool is_mipi = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct gma_encoder *gma_encoder = NULL;
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
int i;
int need_aux = gma_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ? 1 : 0;
@ -385,14 +385,11 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
if (!gma_power_begin(dev, true))
return 0;
memcpy(&gma_crtc->saved_mode,
mode,
sizeof(struct drm_display_mode));
memcpy(&gma_crtc->saved_adjusted_mode,
adjusted_mode,
sizeof(struct drm_display_mode));
drm_mode_copy(&gma_crtc->saved_mode, mode);
drm_mode_copy(&gma_crtc->saved_adjusted_mode, adjusted_mode);
list_for_each_entry(connector, &mode_config->connector_list, head) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (!connector->encoder || connector->encoder->crtc != crtc)
continue;
@ -409,8 +406,16 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
is_mipi = true;
break;
}
break;
}
if (gma_encoder)
drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
drm_connector_list_iter_end(&conn_iter);
/* Disable the VGA plane that we never use */
for (i = 0; i <= need_aux; i++)
REG_WRITE_WITH_AUX(VGACNTRL, VGA_DISP_DISABLE, i);
@ -424,10 +429,6 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
(mode->crtc_vdisplay - 1), i);
}
if (gma_encoder)
drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
/* Moorestown doesn't have register support for centering so
* we need to mess with the h/vblank and h/vsync start and

View file

@ -545,7 +545,6 @@ const struct psb_ops oaktrail_chip_ops = {
.chip_setup = oaktrail_chip_setup,
.chip_teardown = oaktrail_teardown,
.crtc_helper = &oaktrail_helper_funcs,
.crtc_funcs = &gma_intel_crtc_funcs,
.output_init = oaktrail_output_init,

View file

@ -654,7 +654,6 @@ void oaktrail_hdmi_init(struct drm_device *dev,
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_connector_register(connector);
dev_info(dev->dev, "HDMI initialised.\n");
return;

View file

@ -85,7 +85,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector = NULL;
struct drm_crtc *crtc = encoder->crtc;
u32 lvds_port;
@ -112,21 +112,22 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
REG_WRITE(LVDS, lvds_port);
/* Find the connector we're trying to set up */
list_for_each_entry(connector, &mode_config->connector_list, head) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder && connector->encoder->crtc == crtc)
break;
}
if (list_entry_is_head(connector, &mode_config->connector_list, head)) {
if (!connector) {
drm_connector_list_iter_end(&conn_iter);
DRM_ERROR("Couldn't find connector when setting mode");
gma_power_end(dev);
return;
}
drm_object_property_get_value(
&connector->base,
dev->mode_config.scaling_mode_property,
&v);
drm_object_property_get_value( &connector->base,
dev->mode_config.scaling_mode_property, &v);
drm_connector_list_iter_end(&conn_iter);
if (v == DRM_MODE_SCALE_NO_SCALE)
REG_WRITE(PFIT_CONTROL, 0);
@ -400,7 +401,6 @@ void oaktrail_lvds_init(struct drm_device *dev,
out:
mutex_unlock(&dev->mode_config.mutex);
drm_connector_register(connector);
return;
failed_find:

View file

@ -23,6 +23,7 @@
*/
#include <linux/acpi.h>
#include "psb_drv.h"
#include "psb_irq.h"
#include "psb_intel_reg.h"
#define PCI_ASLE 0xe4
@ -217,8 +218,8 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
if (asle && system_opregion ) {
/* Don't do this on Medfield or other non PC like devices, they
use the bit for something different altogether */
psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
psb_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
gma_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
gma_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN
| ASLE_PFMB_EN;

View file

@ -28,6 +28,7 @@
* Alan Cox <alan@linux.intel.com>
*/
#include "gem.h"
#include "power.h"
#include "psb_drv.h"
#include "psb_reg.h"
@ -112,7 +113,9 @@ static void gma_resume_display(struct pci_dev *pdev)
pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
psb_gtt_restore(dev); /* Rebuild our GTT mappings */
/* Rebuild our GTT mappings */
psb_gtt_resume(dev);
psb_gem_mm_resume(dev);
dev_priv->ops->restore_regs(dev);
}
@ -198,7 +201,7 @@ int gma_power_suspend(struct device *_dev)
dev_err(dev->dev, "GPU hardware busy, cannot suspend\n");
return -EBUSY;
}
psb_irq_uninstall(dev);
gma_irq_uninstall(dev);
gma_suspend_display(dev);
gma_suspend_pci(pdev);
}
@ -220,8 +223,8 @@ int gma_power_resume(struct device *_dev)
mutex_lock(&power_mutex);
gma_resume_pci(pdev);
gma_resume_display(pdev);
psb_irq_preinstall(dev);
psb_irq_postinstall(dev);
gma_irq_preinstall(dev);
gma_irq_postinstall(dev);
mutex_unlock(&power_mutex);
return 0;
}
@ -267,8 +270,8 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
/* Ok power up needed */
ret = gma_resume_pci(pdev);
if (ret == 0) {
psb_irq_preinstall(dev);
psb_irq_postinstall(dev);
gma_irq_preinstall(dev);
gma_irq_postinstall(dev);
pm_runtime_get(dev->dev);
dev_priv->display_count++;
spin_unlock_irqrestore(&power_ctrl_lock, flags);

View file

@ -168,8 +168,10 @@ static void psb_init_pm(struct drm_device *dev)
static int psb_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_connector *gma_connector;
struct drm_crtc *crtc;
struct gma_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
/* Display arbitration control + watermarks */
@ -189,9 +191,13 @@ static int psb_save_display_registers(struct drm_device *dev)
dev_priv->ops->save_crtc(crtc);
}
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
if (connector->save)
connector->save(&connector->base);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
gma_connector = to_gma_connector(connector);
if (gma_connector->save)
gma_connector->save(connector);
}
drm_connector_list_iter_end(&conn_iter);
drm_modeset_unlock_all(dev);
return 0;
@ -206,8 +212,10 @@ static int psb_save_display_registers(struct drm_device *dev)
static int psb_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_connector *gma_connector;
struct drm_crtc *crtc;
struct gma_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
/* Display arbitration + watermarks */
@ -228,9 +236,13 @@ static int psb_restore_display_registers(struct drm_device *dev)
if (drm_helper_crtc_in_use(crtc))
dev_priv->ops->restore_crtc(crtc);
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
if (connector->restore)
connector->restore(&connector->base);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
gma_connector = to_gma_connector(connector);
if (gma_connector->restore)
gma_connector->restore(connector);
}
drm_connector_list_iter_end(&conn_iter);
drm_modeset_unlock_all(dev);
return 0;
@ -329,7 +341,6 @@ const struct psb_ops psb_chip_ops = {
.chip_teardown = psb_chip_teardown,
.crtc_helper = &psb_intel_helper_funcs,
.crtc_funcs = &gma_intel_crtc_funcs,
.clock_funcs = &psb_clock_funcs,
.output_init = psb_output_init,

View file

@ -28,6 +28,7 @@
#include <drm/drm_vblank.h>
#include "framebuffer.h"
#include "gem.h"
#include "intel_bios.h"
#include "mid_bios.h"
#include "power.h"
@ -99,7 +100,7 @@ static const struct drm_ioctl_desc psb_ioctls[] = {
*
* Soft reset the graphics engine and then reload the necessary registers.
*/
void psb_spank(struct drm_psb_private *dev_priv)
static void psb_spank(struct drm_psb_private *dev_priv)
{
PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
_PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
@ -172,6 +173,8 @@ static void psb_driver_unload(struct drm_device *dev)
gma_backlight_exit(dev);
psb_modeset_cleanup(dev);
gma_irq_uninstall(dev);
if (dev_priv->ops->chip_teardown)
dev_priv->ops->chip_teardown(dev);
@ -184,17 +187,16 @@ static void psb_driver_unload(struct drm_device *dev)
if (dev_priv->mmu) {
struct psb_gtt *pg = &dev_priv->gtt;
down_read(&pg->sem);
psb_mmu_remove_pfn_sequence(
psb_mmu_get_default_pd
(dev_priv->mmu),
pg->mmu_gatt_start,
dev_priv->vram_stolen_size >> PAGE_SHIFT);
up_read(&pg->sem);
psb_mmu_driver_takedown(dev_priv->mmu);
dev_priv->mmu = NULL;
}
psb_gtt_takedown(dev);
psb_gem_mm_fini(dev);
psb_gtt_fini(dev);
if (dev_priv->scratch_page) {
set_pages_wb(dev_priv->scratch_page, 1);
__free_page(dev_priv->scratch_page);
@ -234,10 +236,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long resource_start, resource_len;
unsigned long irqflags;
int ret = -ENOMEM;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
struct gma_encoder *gma_encoder;
struct psb_gtt *pg;
int ret = -ENOMEM;
/* initializing driver private data */
@ -326,7 +329,10 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
set_pages_uc(dev_priv->scratch_page, 1);
ret = psb_gtt_init(dev, 0);
ret = psb_gtt_init(dev);
if (ret)
goto out_err;
ret = psb_gem_mm_init(dev);
if (ret)
goto out_err;
@ -345,12 +351,10 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
return ret;
/* Add stolen memory to SGX MMU */
down_read(&pg->sem);
ret = psb_mmu_insert_pfn_sequence(psb_mmu_get_default_pd(dev_priv->mmu),
dev_priv->stolen_base >> PAGE_SHIFT,
pg->gatt_start,
pg->stolen_size >> PAGE_SHIFT, 0);
up_read(&pg->sem);
psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
@ -379,7 +383,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
psb_irq_install(dev, pdev->irq);
gma_irq_install(dev, pdev->irq);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
@ -387,9 +391,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
psb_fbdev_init(dev);
drm_kms_helper_poll_init(dev);
/* Only add backlight support if we have LVDS output */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
/* Only add backlight support if we have LVDS or MIPI output */
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
gma_encoder = gma_attached_encoder(connector);
switch (gma_encoder->type) {
@ -399,6 +403,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
break;
}
}
drm_connector_list_iter_end(&conn_iter);
if (ret)
return ret;

View file

@ -13,7 +13,6 @@
#include <drm/drm_device.h>
#include "gma_display.h"
#include "gtt.h"
#include "intel_bios.h"
#include "mmu.h"
@ -36,12 +35,6 @@
/* Append new drm mode definition here, align with libdrm definition */
#define DRM_MODE_SCALE_NO_SCALE 2
enum {
CHIP_PSB_8108 = 0, /* Poulsbo */
CHIP_PSB_8109 = 1, /* Poulsbo */
CHIP_MRST_4100 = 2, /* Moorestown/Oaktrail */
};
#define IS_PSB(drm) ((to_pci_dev((drm)->dev)->device & 0xfffe) == 0x8108)
#define IS_MRST(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x4100)
#define IS_CDV(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x0be0)
@ -408,7 +401,6 @@ struct drm_psb_private {
uint32_t stolen_base;
u8 __iomem *vram_addr;
unsigned long vram_stolen_size;
int gtt_initialized;
u16 gmch_ctrl; /* Saved GTT setup */
u32 pge_ctl;
@ -586,7 +578,6 @@ struct psb_ops {
/* Sub functions */
struct drm_crtc_helper_funcs const *crtc_helper;
struct drm_crtc_funcs const *crtc_funcs;
const struct gma_clock_funcs *clock_funcs;
/* Setup hooks */
@ -618,36 +609,9 @@ struct psb_ops {
int i2c_bus; /* I2C bus identifier for Moorestown */
};
extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
extern int drm_pick_crtcs(struct drm_device *dev);
/* psb_irq.c */
extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
extern int psb_enable_vblank(struct drm_crtc *crtc);
extern void psb_disable_vblank(struct drm_crtc *crtc);
void
psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
void
psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
extern u32 psb_get_vblank_counter(struct drm_crtc *crtc);
/* framebuffer.c */
extern int psbfb_probed(struct drm_device *dev);
extern int psbfb_remove(struct drm_device *dev,
struct drm_framebuffer *fb);
/* psb_drv.c */
extern void psb_spank(struct drm_psb_private *dev_priv);
/* psb_reset.c */
/* psb_lid.c */
extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
@ -670,7 +634,6 @@ extern void oaktrail_lvds_init(struct drm_device *dev,
/* psb_intel_display.c */
extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
extern const struct drm_crtc_funcs gma_intel_crtc_funcs;
/* psb_intel_lvds.c */
extern const struct drm_connector_helper_funcs
@ -690,43 +653,7 @@ extern const struct psb_ops oaktrail_chip_ops;
/* cdv_device.c */
extern const struct psb_ops cdv_chip_ops;
/* Debug print bits setting */
#define PSB_D_GENERAL (1 << 0)
#define PSB_D_INIT (1 << 1)
#define PSB_D_IRQ (1 << 2)
#define PSB_D_ENTRY (1 << 3)
/* debug the get H/V BP/FP count */
#define PSB_D_HV (1 << 4)
#define PSB_D_DBI_BF (1 << 5)
#define PSB_D_PM (1 << 6)
#define PSB_D_RENDER (1 << 7)
#define PSB_D_REG (1 << 8)
#define PSB_D_MSVDX (1 << 9)
#define PSB_D_TOPAZ (1 << 10)
extern int drm_idle_check_interval;
/* Utilities */
static inline u32 MRST_MSG_READ32(int domain, uint port, uint offset)
{
int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
uint32_t ret_val = 0;
struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_read_config_dword(pci_root, 0xD4, &ret_val);
pci_dev_put(pci_root);
return ret_val;
}
static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset,
u32 value)
{
int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0);
pci_write_config_dword(pci_root, 0xD4, value);
pci_write_config_dword(pci_root, 0xD0, mcr);
pci_dev_put(pci_root);
}
static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
@ -807,24 +734,9 @@ static inline void REGISTER_WRITE8(struct drm_device *dev,
#define PSB_WVDC32(_val, _offs) iowrite32(_val, dev_priv->vdc_reg + (_offs))
#define PSB_RVDC32(_offs) ioread32(dev_priv->vdc_reg + (_offs))
/* #define TRAP_SGX_PM_FAULT 1 */
#ifdef TRAP_SGX_PM_FAULT
#define PSB_RSGX32(_offs) \
({ \
if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
pr_err("access sgx when it's off!! (READ) %s, %d\n", \
__FILE__, __LINE__); \
melay(1000); \
} \
ioread32(dev_priv->sgx_reg + (_offs)); \
})
#else
#define PSB_RSGX32(_offs) ioread32(dev_priv->sgx_reg + (_offs))
#endif
#define PSB_WSGX32(_val, _offs) iowrite32(_val, dev_priv->sgx_reg + (_offs))
#define MSVDX_REG_DUMP 0
#define PSB_WMSVDX32(_val, _offs) iowrite32(_val, dev_priv->msvdx_reg + (_offs))
#define PSB_RMSVDX32(_offs) ioread32(dev_priv->msvdx_reg + (_offs))

View file

@ -106,7 +106,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false;
bool is_lvds = false, is_tv = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
const struct gma_limit_t *limit;
@ -116,7 +116,8 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
return 0;
}
list_for_each_entry(connector, &mode_config->connector_list, head) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
if (!connector->encoder
@ -134,7 +135,10 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
is_tv = true;
break;
}
break;
}
drm_connector_list_iter_end(&conn_iter);
refclk = 96000;
@ -427,18 +431,6 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.disable = gma_crtc_disable,
};
const struct drm_crtc_funcs gma_intel_crtc_funcs = {
.cursor_set = gma_crtc_cursor_set,
.cursor_move = gma_crtc_cursor_move,
.gamma_set = gma_crtc_gamma_set,
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
.enable_vblank = psb_enable_vblank,
.disable_vblank = psb_disable_vblank,
.get_vblank_counter = psb_get_vblank_counter,
};
const struct gma_clock_funcs psb_clock_funcs = {
.clock = psb_intel_clock,
.limit = psb_intel_limit,
@ -500,8 +492,7 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
return;
}
/* Set the CRTC operations from the chip specific data */
drm_crtc_init(dev, &gma_crtc->base, dev_priv->ops->crtc_funcs);
drm_crtc_init(dev, &gma_crtc->base, &gma_crtc_funcs);
/* Set the CRTC clock functions from chip specific data */
gma_crtc->clock_funcs = dev_priv->ops->clock_funcs;
@ -535,28 +526,32 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc = NULL;
struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
if (gma_crtc->pipe == pipe)
break;
return crtc;
}
return crtc;
return NULL;
}
int gma_connector_clones(struct drm_device *dev, int type_mask)
{
int index_mask = 0;
struct drm_connector_list_iter conn_iter;
struct drm_connector *connector;
int index_mask = 0;
int entry = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
if (type_mask & (1 << gma_encoder->type))
index_mask |= (1 << entry);
entry++;
}
drm_connector_list_iter_end(&conn_iter);
return index_mask;
}

View file

@ -521,13 +521,13 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
*/
void psb_intel_lvds_destroy(struct drm_connector *connector)
{
struct gma_connector *gma_connector = to_gma_connector(connector);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
psb_intel_i2c_destroy(lvds_priv->ddc_bus);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
kfree(gma_connector);
}
int psb_intel_lvds_set_property(struct drm_connector *connector,
@ -782,7 +782,6 @@ void psb_intel_lvds_init(struct drm_device *dev,
*/
out:
mutex_unlock(&dev->mode_config.mutex);
drm_connector_register(connector);
return;
failed_find:

View file

@ -1542,9 +1542,10 @@ static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
static void psb_intel_sdvo_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
struct gma_connector *gma_connector = to_gma_connector(connector);
drm_connector_cleanup(connector);
kfree(connector);
kfree(gma_connector);
}
static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@ -1932,7 +1933,6 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
connector->base.restore = psb_intel_sdvo_restore;
gma_connector_attach_encoder(&connector->base, &encoder->base);
drm_connector_register(&connector->base.base);
}
static void

View file

@ -21,8 +21,7 @@
* inline functions
*/
static inline u32
psb_pipestat(int pipe)
static inline u32 gma_pipestat(int pipe)
{
if (pipe == 0)
return PIPEASTAT;
@ -33,8 +32,7 @@ psb_pipestat(int pipe)
BUG();
}
static inline u32
mid_pipe_event(int pipe)
static inline u32 gma_pipe_event(int pipe)
{
if (pipe == 0)
return _PSB_PIPEA_EVENT_FLAG;
@ -45,20 +43,7 @@ mid_pipe_event(int pipe)
BUG();
}
static inline u32
mid_pipe_vsync(int pipe)
{
if (pipe == 0)
return _PSB_VSYNC_PIPEA_FLAG;
if (pipe == 1)
return _PSB_VSYNC_PIPEB_FLAG;
if (pipe == 2)
return _MDFLD_PIPEC_VBLANK_FLAG;
BUG();
}
static inline u32
mid_pipeconf(int pipe)
static inline u32 gma_pipeconf(int pipe)
{
if (pipe == 0)
return PIPEACONF;
@ -69,11 +54,10 @@ mid_pipeconf(int pipe)
BUG();
}
void
psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
void gma_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != mask) {
u32 reg = psb_pipestat(pipe);
u32 reg = gma_pipestat(pipe);
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
if (gma_power_begin(&dev_priv->dev, false)) {
@ -86,11 +70,10 @@ psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
}
}
void
psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
void gma_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != 0) {
u32 reg = psb_pipestat(pipe);
u32 reg = gma_pipestat(pipe);
dev_priv->pipestat[pipe] &= ~mask;
if (gma_power_begin(&dev_priv->dev, false)) {
u32 writeVal = PSB_RVDC32(reg);
@ -105,12 +88,12 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
/*
* Display controller interrupt handler for pipe event.
*/
static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
static void gma_pipe_event_handler(struct drm_device *dev, int pipe)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t pipe_stat_val = 0;
uint32_t pipe_stat_reg = psb_pipestat(pipe);
uint32_t pipe_stat_reg = gma_pipestat(pipe);
uint32_t pipe_enable = dev_priv->pipestat[pipe];
uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
uint32_t pipe_clear;
@ -160,22 +143,22 @@ static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
/*
* Display controller interrupt handler.
*/
static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
static void gma_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
{
if (vdc_stat & _PSB_IRQ_ASLE)
psb_intel_opregion_asle_intr(dev);
if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
mid_pipe_event_handler(dev, 0);
gma_pipe_event_handler(dev, 0);
if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
mid_pipe_event_handler(dev, 1);
gma_pipe_event_handler(dev, 1);
}
/*
* SGX interrupt handler
*/
static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
static void gma_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 val, addr;
@ -222,7 +205,7 @@ static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
}
static irqreturn_t psb_irq_handler(int irq, void *arg)
static irqreturn_t gma_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
@ -246,14 +229,14 @@ static irqreturn_t psb_irq_handler(int irq, void *arg)
spin_unlock(&dev_priv->irqmask_lock);
if (dsp_int && gma_power_is_on(dev)) {
psb_vdc_interrupt(dev, vdc_stat);
gma_vdc_interrupt(dev, vdc_stat);
handled = 1;
}
if (sgx_int) {
sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
psb_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
gma_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
handled = 1;
}
@ -274,7 +257,7 @@ static irqreturn_t psb_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
void psb_irq_preinstall(struct drm_device *dev)
void gma_irq_preinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
@ -303,7 +286,7 @@ void psb_irq_preinstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
void psb_irq_postinstall(struct drm_device *dev)
void gma_irq_postinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
@ -322,9 +305,9 @@ void psb_irq_postinstall(struct drm_device *dev)
for (i = 0; i < dev->num_crtcs; ++i) {
if (dev->vblank[i].enabled)
psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
else
psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
}
if (dev_priv->ops->hotplug_enable)
@ -333,26 +316,26 @@ void psb_irq_postinstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
int psb_irq_install(struct drm_device *dev, unsigned int irq)
int gma_irq_install(struct drm_device *dev, unsigned int irq)
{
int ret;
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
psb_irq_preinstall(dev);
gma_irq_preinstall(dev);
/* PCI devices require shared interrupts. */
ret = request_irq(irq, psb_irq_handler, IRQF_SHARED, dev->driver->name, dev);
ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
if (ret)
return ret;
psb_irq_postinstall(dev);
gma_irq_postinstall(dev);
return 0;
}
void psb_irq_uninstall(struct drm_device *dev)
void gma_irq_uninstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
@ -368,7 +351,7 @@ void psb_irq_uninstall(struct drm_device *dev)
for (i = 0; i < dev->num_crtcs; ++i) {
if (dev->vblank[i].enabled)
psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
}
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
@ -388,17 +371,14 @@ void psb_irq_uninstall(struct drm_device *dev)
free_irq(pdev->irq, dev);
}
/*
* It is used to enable VBLANK interrupt
*/
int psb_enable_vblank(struct drm_crtc *crtc)
int gma_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
uint32_t reg_val = 0;
uint32_t pipeconf_reg = mid_pipeconf(pipe);
uint32_t pipeconf_reg = gma_pipeconf(pipe);
if (gma_power_begin(dev, false)) {
reg_val = REG_READ(pipeconf_reg);
@ -417,17 +397,14 @@ int psb_enable_vblank(struct drm_crtc *crtc)
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
gma_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
return 0;
}
/*
* It is used to disable VBLANK interrupt
*/
void psb_disable_vblank(struct drm_crtc *crtc)
void gma_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
@ -443,7 +420,7 @@ void psb_disable_vblank(struct drm_crtc *crtc)
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
gma_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
@ -451,7 +428,7 @@ void psb_disable_vblank(struct drm_crtc *crtc)
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
u32 psb_get_vblank_counter(struct drm_crtc *crtc)
u32 gma_crtc_get_vblank_counter(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
@ -486,8 +463,8 @@ u32 psb_get_vblank_counter(struct drm_crtc *crtc)
if (!(reg_val & PIPEACONF_ENABLE)) {
dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n",
pipe);
goto psb_get_vblank_counter_exit;
pipe);
goto err_gma_power_end;
}
/*
@ -506,8 +483,7 @@ u32 psb_get_vblank_counter(struct drm_crtc *crtc)
count = (high1 << 8) | low;
psb_get_vblank_counter_exit:
err_gma_power_end:
gma_power_end(dev);
return count;

View file

@ -15,16 +15,15 @@
struct drm_crtc;
struct drm_device;
bool sysirq_init(struct drm_device *dev);
void sysirq_uninit(struct drm_device *dev);
void gma_irq_preinstall(struct drm_device *dev);
void gma_irq_postinstall(struct drm_device *dev);
int gma_irq_install(struct drm_device *dev, unsigned int irq);
void gma_irq_uninstall(struct drm_device *dev);
void psb_irq_preinstall(struct drm_device *dev);
void psb_irq_postinstall(struct drm_device *dev);
int psb_irq_install(struct drm_device *dev, unsigned int irq);
void psb_irq_uninstall(struct drm_device *dev);
int psb_enable_vblank(struct drm_crtc *crtc);
void psb_disable_vblank(struct drm_crtc *crtc);
u32 psb_get_vblank_counter(struct drm_crtc *crtc);
int gma_crtc_enable_vblank(struct drm_crtc *crtc);
void gma_crtc_disable_vblank(struct drm_crtc *crtc);
u32 gma_crtc_get_vblank_counter(struct drm_crtc *crtc);
void gma_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
void gma_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
#endif /* _PSB_IRQ_H_ */

View file

@ -108,7 +108,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
trace_i915_gem_object_clflush(obj);
clflush = NULL;
if (!(flags & I915_CLFLUSH_SYNC))
if (!(flags & I915_CLFLUSH_SYNC) &&
dma_resv_reserve_fences(obj->base.resv, 1) == 0)
clflush = clflush_work_create(obj);
if (clflush) {
i915_sw_fence_await_reservation(&clflush->base.chain,

View file

@ -998,11 +998,9 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
}
}
if (!(ev->flags & EXEC_OBJECT_WRITE)) {
err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
if (err)
return err;
}
err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
if (err)
return err;
GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
eb_vma_misplaced(&eb->exec[i], vma, ev->flags));
@ -2303,7 +2301,7 @@ static int eb_parse(struct i915_execbuffer *eb)
if (IS_ERR(batch))
return PTR_ERR(batch);
err = dma_resv_reserve_shared(shadow->obj->base.resv, 1);
err = dma_resv_reserve_fences(shadow->obj->base.resv, 1);
if (err)
return err;

View file

@ -283,7 +283,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
i915_tt->is_shmem = true;
}
ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching);
ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, 0);
if (ret)
goto err_free;
@ -936,7 +936,7 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
bo->priority = I915_TTM_PRIO_HAS_PAGES;
}
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
ttm_bo_move_to_lru_tail(bo);
spin_unlock(&bo->bdev->lru_lock);
}

View file

@ -611,7 +611,11 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
assert_object_held(src);
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
ret = dma_resv_reserve_shared(src_bo->base.resv, 1);
ret = dma_resv_reserve_fences(src_bo->base.resv, 1);
if (ret)
return ret;
ret = dma_resv_reserve_fences(dst_bo->base.resv, 1);
if (ret)
return ret;

View file

@ -216,7 +216,10 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
i915_gem_object_is_lmem(obj),
0xdeadbeaf, &rq);
if (rq) {
dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
err = dma_resv_reserve_fences(obj->base.resv, 1);
if (!err)
dma_resv_add_excl_fence(obj->base.resv,
&rq->fence);
i915_gem_object_set_moving_fence(obj, &rq->fence);
i915_request_put(rq);
}

View file

@ -1819,6 +1819,12 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
intel_frontbuffer_put(front);
}
if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
if (unlikely(err))
return err;
}
if (fence) {
dma_resv_add_excl_fence(vma->obj->base.resv, fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
@ -1826,7 +1832,7 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
}
} else {
if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
if (unlikely(err))
return err;
}
@ -2044,7 +2050,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
if (!obj->mm.rsgt)
return -EBUSY;
err = dma_resv_reserve_shared(obj->base.resv, 1);
err = dma_resv_reserve_fences(obj->base.resv, 1);
if (err)
return -EBUSY;

View file

@ -1043,6 +1043,13 @@ static int igt_lmem_write_cpu(void *arg)
}
i915_gem_object_lock(obj, NULL);
err = dma_resv_reserve_fences(obj->base.resv, 1);
if (err) {
i915_gem_object_unlock(obj);
goto out_put;
}
/* Put the pages into a known state -- from the gpu for added fun */
intel_engine_pm_get(engine);
err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,

View file

@ -150,10 +150,9 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
if (imx_ldb_ch->mode_valid) {
struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev);
mode = drm_mode_duplicate(connector->dev, &imx_ldb_ch->mode);
if (!mode)
return -EINVAL;
drm_mode_copy(mode, &imx_ldb_ch->mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
num_modes++;

View file

@ -226,6 +226,18 @@ static int ingenic_drm_update_pixclk(struct notifier_block *nb,
}
}
static void ingenic_drm_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct ingenic_drm *priv = drm_device_get_priv(bridge->dev);
regmap_write(priv->map, JZ_REG_LCD_STATE, 0);
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
JZ_LCD_CTRL_ENABLE | JZ_LCD_CTRL_DISABLE,
JZ_LCD_CTRL_ENABLE);
}
static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@ -237,28 +249,20 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
if (WARN_ON(IS_ERR(priv_state)))
return;
regmap_write(priv->map, JZ_REG_LCD_STATE, 0);
/* Set addresses of our DMA descriptor chains */
next_id = priv_state->use_palette ? HWDESC_PALETTE : 0;
regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_addr(priv, next_id));
regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_addr(priv, 1));
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
JZ_LCD_CTRL_ENABLE | JZ_LCD_CTRL_DISABLE,
JZ_LCD_CTRL_ENABLE);
drm_crtc_vblank_on(crtc);
}
static void ingenic_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
static void ingenic_drm_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
struct ingenic_drm *priv = drm_device_get_priv(bridge->dev);
unsigned int var;
drm_crtc_vblank_off(crtc);
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
JZ_LCD_CTRL_DISABLE, JZ_LCD_CTRL_DISABLE);
@ -267,6 +271,12 @@ static void ingenic_drm_crtc_atomic_disable(struct drm_crtc *crtc,
1000, 0);
}
static void ingenic_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
drm_crtc_vblank_off(crtc);
}
static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv,
struct drm_display_mode *mode)
{
@ -968,6 +978,8 @@ static const struct drm_encoder_helper_funcs ingenic_drm_encoder_helper_funcs =
static const struct drm_bridge_funcs ingenic_drm_bridge_funcs = {
.attach = ingenic_drm_bridge_attach,
.atomic_enable = ingenic_drm_bridge_atomic_enable,
.atomic_disable = ingenic_drm_bridge_atomic_disable,
.atomic_check = ingenic_drm_bridge_atomic_check,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,

View file

@ -257,13 +257,11 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
bool write, bool explicit)
{
int err = 0;
int err;
if (!write) {
err = dma_resv_reserve_shared(lima_bo_resv(bo), 1);
if (err)
return err;
}
err = dma_resv_reserve_fences(lima_bo_resv(bo), 1);
if (err)
return err;
/* explicit sync use user passed dep fence */
if (explicit)

View file

@ -19,7 +19,6 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@ -39,7 +38,6 @@ struct mcde_dsi {
struct device *dev;
struct mcde *mcde;
struct drm_bridge bridge;
struct drm_panel *panel;
struct drm_bridge *bridge_out;
struct mipi_dsi_host dsi_host;
struct mipi_dsi_device *mdsi;
@ -1073,9 +1071,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct mcde *mcde = to_mcde(drm);
struct mcde_dsi *d = dev_get_drvdata(dev);
struct device_node *child;
struct drm_panel *panel = NULL;
struct drm_bridge *bridge = NULL;
struct drm_bridge *bridge;
if (!of_get_available_child_count(dev->of_node)) {
dev_info(dev, "unused DSI interface\n");
@ -1100,37 +1096,10 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
return PTR_ERR(d->lp_clk);
}
/* Look for a panel as a child to this node */
for_each_available_child_of_node(dev->of_node, child) {
panel = of_drm_find_panel(child);
if (IS_ERR(panel)) {
dev_err(dev, "failed to find panel try bridge (%ld)\n",
PTR_ERR(panel));
panel = NULL;
bridge = of_drm_find_bridge(child);
if (!bridge) {
dev_err(dev, "failed to find bridge\n");
return -EINVAL;
}
}
}
if (panel) {
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge)) {
dev_err(dev, "error adding panel bridge\n");
return PTR_ERR(bridge);
}
dev_info(dev, "connected to panel\n");
d->panel = panel;
} else if (bridge) {
/* TODO: AV8100 HDMI encoder goes here for example */
dev_info(dev, "connected to non-panel bridge (unsupported)\n");
return -ENODEV;
} else {
dev_err(dev, "no panel or bridge\n");
return -ENODEV;
bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(bridge)) {
dev_err(dev, "error to get bridge\n");
return PTR_ERR(bridge);
}
d->bridge_out = bridge;
@ -1153,8 +1122,6 @@ static void mcde_dsi_unbind(struct device *dev, struct device *master,
{
struct mcde_dsi *d = dev_get_drvdata(dev);
if (d->panel)
drm_panel_bridge_remove(d->bridge_out);
regmap_update_bits(d->prcmu, PRCM_DSI_SW_RESET,
PRCM_DSI_SW_RESET_DSI0_SW_RESETN, 0);
}

View file

@ -500,6 +500,18 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n");
}
if ((dsi->mode_flags & MIPI_DSI_HS_PKT_END_ALIGNED) &&
(dsi->lanes == 4)) {
horizontal_sync_active_byte =
roundup(horizontal_sync_active_byte, dsi->lanes) - 2;
horizontal_frontporch_byte =
roundup(horizontal_frontporch_byte, dsi->lanes) - 2;
horizontal_backporch_byte =
roundup(horizontal_backporch_byte, dsi->lanes) - 2;
horizontal_backporch_byte -=
(vm->hactive * dsi_tmp_buf_bpp + 2) % dsi->lanes;
}
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);

View file

@ -168,7 +168,7 @@ static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = {
},
.attrs = (const struct soc_device_attribute []) {
{ .soc_id = "GXL (S805*)", },
{ /* sentinel */ },
{ /* sentinel */ }
}
},
};

View file

@ -320,16 +320,14 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
struct drm_gem_object *obj = &submit->bos[i].obj->base;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
if (!write) {
/* NOTE: _reserve_shared() must happen before
* _add_shared_fence(), which makes this a slightly
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
ret = dma_resv_reserve_shared(obj->resv, 1);
if (ret)
return ret;
}
/* NOTE: _reserve_shared() must happen before
* _add_shared_fence(), which makes this a slightly
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
ret = dma_resv_reserve_fences(obj->resv, 1);
if (ret)
return ret;
/* exclusive fences must be ordered */
if (no_implicit && !write)

Some files were not shown because too many files have changed in this diff Show more