Merge branch 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (44 commits)
  drm/i915: fix ioremap of a user address for non-root (CVE-2008-3831)
  drm: make CONFIG_DRM depend on CONFIG_SHMEM.
  radeon: fix PCI bus mastering support enables.
  radeon: add RS400 family support.
  drm/radeon: add support for RS740 IGP chipsets.
  i915: GM45 has GM965-style MCH setup.
  i915: Don't run retire work handler while suspended
  i915: Map status page cached for chips with GTT-based HWS location.
  i915: Fix up ring initialization to cover G45 oddities
  i915: Use non-reserved status page index for breadcrumb
  drm: Increment dev_priv->irq_received so i915_gem_interrupts count works.
  drm: kill drm_device->irq
  drm: wbinvd is cache coherent.
  i915: add missing return in error path.
  i915: fixup permissions on gem ioctls.
  drm: Clean up many sparse warnings in i915.
  drm: Use ioremap_wc in i915_driver instead of ioremap, since we always want WC.
  drm: G33-class hardware has a newer 965-style MCH (no DCC register).
  drm: Avoid oops in GEM execbuffers with bad arguments.
  DRM: Return -EBADF on bad object in flink, and return curent name if it exists.
  ...
This commit is contained in:
Linus Torvalds 2008-10-17 15:09:20 -07:00
commit f7ea4a4ba8
49 changed files with 8816 additions and 1966 deletions

View file

@ -137,6 +137,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
return (void*) vaddr;
}
EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
struct page *kmap_atomic_to_page(void *ptr)
{

View file

@ -6,7 +6,7 @@
#
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && SHMEM
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
@ -87,6 +87,7 @@ config DRM_MGA
config DRM_SIS
tristate "SiS video cards"
depends on DRM && AGP
depends on FB_SIS || FB_SIS=n
help
Choose this option if you have a SiS 630 or compatible video
chipset. If M is selected the module will be called sis. AGP

View file

@ -4,8 +4,9 @@
ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o drm_drawable.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o

View file

@ -33,6 +33,7 @@
#include "drmP.h"
#include <linux/module.h>
#include <asm/agp.h>
#if __OS_HAS_AGP
@ -452,4 +453,53 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
return agp_unbind_memory(handle);
}
#endif /* __OS_HAS_AGP */
/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
* No reference is held on the pages during this time -- it is up to the
* caller to handle that.
*/
DRM_AGP_MEM *
drm_agp_bind_pages(struct drm_device *dev,
struct page **pages,
unsigned long num_pages,
uint32_t gtt_offset,
u32 type)
{
DRM_AGP_MEM *mem;
int ret, i;
DRM_DEBUG("\n");
mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
type);
if (mem == NULL) {
DRM_ERROR("Failed to allocate memory for %ld pages\n",
num_pages);
return NULL;
}
for (i = 0; i < num_pages; i++)
mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
mem->page_count = num_pages;
mem->is_flushed = true;
ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
if (ret != 0) {
DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
agp_free_memory(mem);
return NULL;
}
return mem;
}
EXPORT_SYMBOL(drm_agp_bind_pages);
void drm_agp_chipset_flush(struct drm_device *dev)
{
agp_flush_chipset(dev->agp->bridge);
}
EXPORT_SYMBOL(drm_agp_chipset_flush);
#endif /* __OS_HAS_AGP */

View file

@ -0,0 +1,69 @@
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#if defined(CONFIG_X86)
static void
drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
if (unlikely(page == NULL))
return;
page_virtual = kmap_atomic(page, KM_USER0);
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
clflush(page_virtual + i);
kunmap_atomic(page_virtual, KM_USER0);
}
#endif
void
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
unsigned long i;
mb();
for (i = 0; i < num_pages; ++i)
drm_clflush_page(*pages++);
mb();
return;
}
wbinvd();
#endif
}
EXPORT_SYMBOL(drm_clflush_pages);

View file

@ -116,7 +116,13 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )

View file

@ -246,7 +246,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->filp = filp;
priv->uid = current->euid;
priv->uid = current_euid();
priv->pid = task_pid_nr(current);
priv->minor = idr_find(&drm_minors_idr, minor_id);
priv->ioctl_count = 0;
@ -256,6 +256,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
INIT_LIST_HEAD(&priv->lhead);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@ -400,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp)
dev->driver->reclaim_buffers(dev, file_priv);
}
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);

421
drivers/gpu/drm/drm_gem.c Normal file
View file

@ -0,0 +1,421 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include "drmP.h"
/** @file drm_gem.c
*
* This file provides some of the base ioctls and library routines for
* the graphics memory manager implemented by each device driver.
*
* Because various devices have different requirements in terms of
* synchronization and migration strategies, implementing that is left up to
* the driver, and all that the general API provides should be generic --
* allocating objects, reading/writing data with the cpu, freeing objects.
* Even there, platform-dependent optimizations for reading/writing data with
* the CPU mean we'll likely hook those out to driver-specific calls. However,
* the DRI2 implementation wants to have at least allocate/mmap be generic.
*
* The goal was to have swap-backed object allocation managed through
* struct file. However, file descriptors as handles to a struct file have
* two major failings:
* - Process limits prevent more than 1024 or so being used at a time by
* default.
* - Inability to allocate high fds will aggravate the X Server's select()
* handling, and likely that of many GL client applications as well.
*
* This led to a plan of using our own integer IDs (called handles, following
* DRM terminology) to mimic fds, and implement the fd syscalls we need as
* ioctls. The objects themselves will still include the struct file so
* that we can transition to fds if the required kernel infrastructure shows
* up at a later date, and as our interface with shmfs for memory allocation.
*/
/**
* Initialize the GEM device fields
*/
int
drm_gem_init(struct drm_device *dev)
{
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
atomic_set(&dev->object_count, 0);
atomic_set(&dev->object_memory, 0);
atomic_set(&dev->pin_count, 0);
atomic_set(&dev->pin_memory, 0);
atomic_set(&dev->gtt_count, 0);
atomic_set(&dev->gtt_memory, 0);
return 0;
}
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
obj->dev = dev;
obj->filp = shmem_file_setup("drm mm object", size, 0);
if (IS_ERR(obj->filp)) {
kfree(obj);
return NULL;
}
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
fput(obj->filp);
kfree(obj);
return NULL;
}
atomic_inc(&dev->object_count);
atomic_add(obj->size, &dev->object_memory);
return obj;
}
EXPORT_SYMBOL(drm_gem_object_alloc);
/**
* Removes the mapping from handle to filp for this object.
*/
static int
drm_gem_handle_delete(struct drm_file *filp, int handle)
{
struct drm_device *dev;
struct drm_gem_object *obj;
/* This is gross. The idr system doesn't let us try a delete and
* return an error code. It just spews if you fail at deleting.
* So, we have to grab a lock around finding the object and then
* doing the delete on it and dropping the refcount, or the user
* could race us to double-decrement the refcount and cause a
* use-after-free later. Given the frequency of our handle lookups,
* we may want to use ida for number allocation and a hash table
* for the pointers, anyway.
*/
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
return -EINVAL;
}
dev = obj->dev;
/* Release reference and decrement refcount. */
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
mutex_lock(&dev->struct_mutex);
drm_gem_object_handle_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return 0;
}
/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
int
drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
int *handlep)
{
int ret;
/*
* Get the user-visible handle using idr.
*/
again:
/* ensure there is space available to allocate a handle */
if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
return -ENOMEM;
/* do the allocation under our spinlock */
spin_lock(&file_priv->table_lock);
ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
spin_unlock(&file_priv->table_lock);
if (ret == -EAGAIN)
goto again;
if (ret != 0)
return ret;
drm_gem_object_handle_reference(obj);
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_create);
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
int handle)
{
struct drm_gem_object *obj;
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
return NULL;
}
drm_gem_object_reference(obj);
spin_unlock(&filp->table_lock);
return obj;
}
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
* Releases the handle to an mm object.
*/
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_close *args = data;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
ret = drm_gem_handle_delete(file_priv, args->handle);
return ret;
}
/**
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
* is freed, the name goes away.
*/
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_flink *args = data;
struct drm_gem_object *obj;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
again:
if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
return -ENOMEM;
spin_lock(&dev->object_name_lock);
if (obj->name) {
args->name = obj->name;
spin_unlock(&dev->object_name_lock);
return 0;
}
ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
&obj->name);
spin_unlock(&dev->object_name_lock);
if (ret == -EAGAIN)
goto again;
if (ret != 0) {
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/*
* Leave the reference from the lookup around as the
* name table now holds one
*/
args->name = (uint64_t) obj->name;
return 0;
}
/**
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_open *args = data;
struct drm_gem_object *obj;
int ret;
int handle;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
spin_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
if (obj)
drm_gem_object_reference(obj);
spin_unlock(&dev->object_name_lock);
if (!obj)
return -ENOENT;
ret = drm_gem_handle_create(file_priv, obj, &handle);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
if (ret)
return ret;
args->handle = handle;
args->size = obj->size;
return 0;
}
/**
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
void
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
idr_init(&file_private->object_idr);
spin_lock_init(&file_private->table_lock);
}
/**
* Called at device close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
drm_gem_object_handle_unreference(obj);
return 0;
}
/**
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
*/
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
mutex_lock(&dev->struct_mutex);
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, NULL);
idr_destroy(&file_private->object_idr);
mutex_unlock(&dev->struct_mutex);
}
/**
* Called after the last reference to the object has been lost.
*
* Frees the object
*/
void
drm_gem_object_free(struct kref *kref)
{
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
fput(obj->filp);
atomic_dec(&dev->object_count);
atomic_sub(obj->size, &dev->object_memory);
kfree(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
void
drm_gem_object_handle_free(struct kref *kref)
{
struct drm_gem_object *obj = container_of(kref,
struct drm_gem_object,
handlecount);
struct drm_device *dev = obj->dev;
/* Remove any name for this object */
spin_lock(&dev->object_name_lock);
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
spin_unlock(&dev->object_name_lock);
/*
* The object name held a reference to this object, drop
* that now.
*/
drm_gem_object_unreference(obj);
} else
spin_unlock(&dev->object_name_lock);
}
EXPORT_SYMBOL(drm_gem_object_handle_free);

View file

@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
p->irq = dev->irq;
p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
@ -71,25 +71,137 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
return 0;
}
static void vblank_disable_fn(unsigned long arg)
{
struct drm_device *dev = (struct drm_device *)arg;
unsigned long irqflags;
int i;
if (!dev->vblank_disable_allowed)
return;
for (i = 0; i < dev->num_crtcs; i++) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
dev->last_vblank[i] =
dev->driver->get_vblank_counter(dev, i);
dev->driver->disable_vblank(dev, i);
dev->vblank_enabled[i] = 0;
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
static void drm_vblank_cleanup(struct drm_device *dev)
{
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
del_timer(&dev->vblank_disable_timer);
vblank_disable_fn((unsigned long)dev);
drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
dev->num_crtcs, DRM_MEM_DRIVER);
dev->num_crtcs = 0;
}
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
int i, ret = -ENOMEM;
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
(unsigned long)dev);
spin_lock_init(&dev->vbl_lock);
atomic_set(&dev->vbl_signal_pending, 0);
dev->num_crtcs = num_crtcs;
dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vbl_queue)
goto err;
dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vbl_sigs)
goto err;
dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->_vblank_count)
goto err;
dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vblank_refcount)
goto err;
dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
DRM_MEM_DRIVER);
if (!dev->vblank_enabled)
goto err;
dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
if (!dev->last_vblank)
goto err;
dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
DRM_MEM_DRIVER);
if (!dev->vblank_inmodeset)
goto err;
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
init_waitqueue_head(&dev->vbl_queue[i]);
INIT_LIST_HEAD(&dev->vbl_sigs[i]);
atomic_set(&dev->_vblank_count[i], 0);
atomic_set(&dev->vblank_refcount[i], 0);
}
dev->vblank_disable_allowed = 0;
return 0;
err:
drm_vblank_cleanup(dev);
return ret;
}
EXPORT_SYMBOL(drm_vblank_init);
/**
* Install IRQ handler.
*
* \param dev DRM device.
* \param irq IRQ number.
*
* Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
* Initializes the IRQ related data. Installs the handler, calling the driver
* \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
* before and after the installation.
*/
static int drm_irq_install(struct drm_device * dev)
int drm_irq_install(struct drm_device *dev)
{
int ret;
int ret = 0;
unsigned long sh_flags = 0;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
if (dev->irq == 0)
if (dev->pdev->irq == 0)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
@ -107,18 +219,7 @@ static int drm_irq_install(struct drm_device * dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("irq=%d\n", dev->irq);
if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init(&dev->vbl_lock);
INIT_LIST_HEAD(&dev->vbl_sigs);
INIT_LIST_HEAD(&dev->vbl_sigs2);
dev->vbl_pending = 0;
}
DRM_DEBUG("irq=%d\n", dev->pdev->irq);
/* Before installing handler */
dev->driver->irq_preinstall(dev);
@ -127,8 +228,9 @@ static int drm_irq_install(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
ret = request_irq(dev->irq, dev->driver->irq_handler,
ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
sh_flags, dev->devname, dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = 0;
@ -137,10 +239,16 @@ static int drm_irq_install(struct drm_device * dev)
}
/* After installing handler */
dev->driver->irq_postinstall(dev);
ret = dev->driver->irq_postinstall(dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = 0;
mutex_unlock(&dev->struct_mutex);
}
return 0;
return ret;
}
EXPORT_SYMBOL(drm_irq_install);
/**
* Uninstall the IRQ handler.
@ -164,17 +272,18 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
DRM_DEBUG("irq=%d\n", dev->irq);
DRM_DEBUG("irq=%d\n", dev->pdev->irq);
dev->driver->irq_uninstall(dev);
free_irq(dev->irq, dev);
free_irq(dev->pdev->irq, dev);
drm_vblank_cleanup(dev);
dev->locked_tasklet_func = NULL;
return 0;
}
EXPORT_SYMBOL(drm_irq_uninstall);
/**
@ -201,7 +310,7 @@ int drm_control(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != dev->irq)
ctl->irq != dev->pdev->irq)
return -EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
@ -213,6 +322,174 @@ int drm_control(struct drm_device *dev, void *data,
}
}
/**
* drm_vblank_count - retrieve "cooked" vblank counter value
* @dev: DRM device
* @crtc: which counter to retrieve
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*/
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
return atomic_read(&dev->_vblank_count[crtc]);
}
EXPORT_SYMBOL(drm_vblank_count);
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
* @crtc: counter to update
*
* Call back into the driver to update the appropriate vblank counter
* (specified by @crtc). Deal with wraparound, if it occurred, and
* update the last read value so we can deal with wraparound on the next
* call if necessary.
*
* Only necessary when going from off->on, to account for frames we
* didn't get an interrupt for.
*
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
*/
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
u32 cur_vblank, diff;
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
*/
cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
diff = cur_vblank - dev->last_vblank[crtc];
if (cur_vblank < dev->last_vblank[crtc]) {
diff += dev->max_vblank_count;
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
crtc, dev->last_vblank[crtc], cur_vblank, diff);
}
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
atomic_add(diff, &dev->_vblank_count[crtc]);
}
/**
* drm_vblank_get - get a reference count on vblank events
* @dev: DRM device
* @crtc: which CRTC to own
*
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
* RETURNS
* Zero on success, nonzero on failure.
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
int ret = 0;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
!dev->vblank_enabled[crtc]) {
ret = dev->driver->enable_vblank(dev, crtc);
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
if (ret)
atomic_dec(&dev->vblank_refcount[crtc]);
else {
dev->vblank_enabled[crtc] = 1;
drm_update_vblank_count(dev, crtc);
}
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
return ret;
}
EXPORT_SYMBOL(drm_vblank_get);
/**
* drm_vblank_put - give up ownership of vblank events
* @dev: DRM device
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible.
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
}
EXPORT_SYMBOL(drm_vblank_put);
/**
* drm_modeset_ctl - handle vblank event counter changes across mode switch
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
* Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
* ioctls around modesetting so that any lost vblank events are accounted for.
*
* Generally the counter will reset across mode sets. If interrupts are
* enabled around this call, we don't have to do anything since the counter
* will have already been incremented.
*/
int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
unsigned long irqflags;
int crtc, ret = 0;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
goto out;
crtc = modeset->crtc;
if (crtc >= dev->num_crtcs) {
ret = -EINVAL;
goto out;
}
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
* have the kernel take a reference on the CRTC (just once though
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
if (!dev->vblank_inmodeset[crtc]) {
dev->vblank_inmodeset[crtc] = 1;
drm_vblank_get(dev, crtc);
}
break;
case _DRM_POST_MODESET:
if (dev->vblank_inmodeset[crtc]) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
dev->vblank_inmodeset[crtc] = 0;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
drm_vblank_put(dev, crtc);
}
break;
default:
ret = -EINVAL;
break;
}
out:
return ret;
}
/**
* Wait for VBLANK.
*
@ -232,14 +509,14 @@ int drm_control(struct drm_device *dev, void *data,
*
* If a signal is not requested, then calls vblank_wait().
*/
int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_wait_vblank *vblwait = data;
struct timeval now;
int ret = 0;
unsigned int flags, seq;
unsigned int flags, seq, crtc;
if ((!dev->irq) || (!dev->irq_enabled))
if ((!dev->pdev->irq) || (!dev->irq_enabled))
return -EINVAL;
if (vblwait->request.type &
@ -251,13 +528,17 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
if (crtc >= dev->num_crtcs)
return -EINVAL;
seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
: &dev->vbl_received);
ret = drm_vblank_get(dev, crtc);
if (ret) {
DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
return ret;
}
seq = drm_vblank_count(dev, crtc);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
@ -266,7 +547,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
case _DRM_VBLANK_ABSOLUTE:
break;
default:
return -EINVAL;
ret = -EINVAL;
goto done;
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@ -276,8 +558,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
? &dev->vbl_sigs2 : &dev->vbl_sigs;
struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
struct drm_vbl_sig *vbl_sig;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
@ -298,22 +579,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
}
}
if (dev->vbl_pending >= 100) {
if (atomic_read(&dev->vbl_signal_pending) >= 100) {
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
return -EBUSY;
ret = -EBUSY;
goto done;
}
dev->vbl_pending++;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (!
(vbl_sig =
drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
return -ENOMEM;
vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
DRM_MEM_DRIVER);
if (!vbl_sig) {
ret = -ENOMEM;
goto done;
}
memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
ret = drm_vblank_get(dev, crtc);
if (ret) {
drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
DRM_MEM_DRIVER);
return ret;
}
atomic_inc(&dev->vbl_signal_pending);
vbl_sig->sequence = vblwait->request.sequence;
vbl_sig->info.si_signo = vblwait->request.signal;
@ -327,20 +615,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
vblwait->reply.sequence = seq;
} else {
if (flags & _DRM_VBLANK_SECONDARY) {
if (dev->driver->vblank_wait2)
ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
} else if (dev->driver->vblank_wait)
ret =
dev->driver->vblank_wait(dev,
&vblwait->request.sequence);
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
((drm_vblank_count(dev, crtc)
- vblwait->request.sequence) <= (1 << 23)));
do_gettimeofday(&now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
if (ret != -EINTR) {
struct timeval now;
do_gettimeofday(&now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
vblwait->reply.sequence = drm_vblank_count(dev, crtc);
DRM_DEBUG("returning %d to client\n",
vblwait->reply.sequence);
} else {
DRM_DEBUG("vblank wait interrupted by signal\n");
}
}
done:
done:
drm_vblank_put(dev, crtc);
return ret;
}
@ -348,44 +645,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
* Send the VBLANK signals.
*
* \param dev DRM device.
* \param crtc CRTC where the vblank event occurred
*
* Sends a signal for each task in drm_device::vbl_sigs and empties the list.
*
* If a signal is not requested, then calls vblank_wait().
*/
void drm_vbl_send_signals(struct drm_device * dev)
static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
{
struct drm_vbl_sig *vbl_sig, *tmp;
struct list_head *vbl_sigs;
unsigned int vbl_seq;
unsigned long flags;
int i;
spin_lock_irqsave(&dev->vbl_lock, flags);
for (i = 0; i < 2; i++) {
struct drm_vbl_sig *vbl_sig, *tmp;
struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
&dev->vbl_received);
vbl_sigs = &dev->vbl_sigs[crtc];
vbl_seq = drm_vblank_count(dev, crtc);
list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info(vbl_sig->info.si_signo,
&vbl_sig->info, vbl_sig->task);
list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info(vbl_sig->info.si_signo,
&vbl_sig->info, vbl_sig->task);
list_del(&vbl_sig->head);
list_del(&vbl_sig->head);
drm_free(vbl_sig, sizeof(*vbl_sig),
DRM_MEM_DRIVER);
dev->vbl_pending--;
}
}
drm_free(vbl_sig, sizeof(*vbl_sig),
DRM_MEM_DRIVER);
atomic_dec(&dev->vbl_signal_pending);
drm_vblank_put(dev, crtc);
}
}
spin_unlock_irqrestore(&dev->vbl_lock, flags);
}
EXPORT_SYMBOL(drm_vbl_send_signals);
/**
* drm_handle_vblank - handle a vblank event
* @dev: DRM device
* @crtc: where this event occurred
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*/
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
atomic_inc(&dev->_vblank_count[crtc]);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
drm_vbl_send_signals(dev, crtc);
}
EXPORT_SYMBOL(drm_handle_vblank);
/**
* Tasklet wrapper function.

View file

@ -133,6 +133,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
return drm_agp_free_memory(handle) ? 0 : -EINVAL;
}
EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@ -145,6 +146,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,

View file

@ -169,6 +169,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
return child;
}
EXPORT_SYMBOL(drm_mm_get_block);
/*
* Put a block. Merge with the previous and / or next block if they are free.
@ -217,6 +218,7 @@ void drm_mm_put_block(struct drm_mm_node * cur)
drm_free(cur, sizeof(*cur), DRM_MEM_MM);
}
}
EXPORT_SYMBOL(drm_mm_put_block);
struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
unsigned long size,
@ -265,6 +267,7 @@ int drm_mm_clean(struct drm_mm * mm)
return (head->next->next == head);
}
EXPORT_SYMBOL(drm_mm_search_free);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
@ -273,7 +276,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
return drm_mm_create_tail_node(mm, start, size);
}
EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{

View file

@ -49,6 +49,10 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_gem_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_gem_object_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
@ -60,13 +64,16 @@ static int drm_vma_info(char *buf, char **start, off_t offset,
static struct drm_proc_list {
const char *name; /**< file name */
int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
u32 driver_features; /**< Required driver features for this entry */
} drm_proc_list[] = {
{"name", drm_name_info},
{"mem", drm_mem_info},
{"vm", drm_vm_info},
{"clients", drm_clients_info},
{"queues", drm_queues_info},
{"bufs", drm_bufs_info},
{"name", drm_name_info, 0},
{"mem", drm_mem_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
{"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info},
#endif
@ -90,8 +97,9 @@ static struct drm_proc_list {
int drm_proc_init(struct drm_minor *minor, int minor_id,
struct proc_dir_entry *root)
{
struct drm_device *dev = minor->dev;
struct proc_dir_entry *ent;
int i, j;
int i, j, ret;
char name[64];
sprintf(name, "%d", minor_id);
@ -102,23 +110,42 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
}
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
u32 features = drm_proc_list[i].driver_features;
if (features != 0 &&
(dev->driver->driver_features & features) != features)
continue;
ent = create_proc_entry(drm_proc_list[i].name,
S_IFREG | S_IRUGO, minor->dev_root);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
name, drm_proc_list[i].name);
for (j = 0; j < i; j++)
remove_proc_entry(drm_proc_list[i].name,
minor->dev_root);
remove_proc_entry(name, root);
minor->dev_root = NULL;
return -1;
ret = -1;
goto fail;
}
ent->read_proc = drm_proc_list[i].f;
ent->data = minor;
}
if (dev->driver->proc_init) {
ret = dev->driver->proc_init(minor);
if (ret) {
DRM_ERROR("DRM: Driver failed to initialize "
"/proc/dri.\n");
goto fail;
}
}
return 0;
fail:
for (j = 0; j < i; j++)
remove_proc_entry(drm_proc_list[i].name,
minor->dev_root);
remove_proc_entry(name, root);
minor->dev_root = NULL;
return ret;
}
/**
@ -133,12 +160,16 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
*/
int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
{
struct drm_device *dev = minor->dev;
int i;
char name[64];
if (!root || !minor->dev_root)
return 0;
if (dev->driver->proc_cleanup)
dev->driver->proc_cleanup(minor);
for (i = 0; i < DRM_PROC_ENTRIES; i++)
remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
sprintf(name, "%d", minor->index);
@ -480,6 +511,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
return ret;
}
struct drm_gem_name_info_data {
int len;
char *buf;
int eof;
};
static int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
struct drm_gem_name_info_data *nid = data;
DRM_INFO("name %d size %d\n", obj->name, obj->size);
if (nid->eof)
return 0;
nid->len += sprintf(&nid->buf[nid->len],
"%6d%9d%8d%9d\n",
obj->name, obj->size,
atomic_read(&obj->handlecount.refcount),
atomic_read(&obj->refcount.refcount));
if (nid->len > DRM_PROC_LIMIT) {
nid->eof = 1;
return 0;
}
return 0;
}
static int drm_gem_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
struct drm_gem_name_info_data nid;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
nid.len = sprintf(buf, " name size handles refcount\n");
nid.buf = buf;
nid.eof = 0;
idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
*start = &buf[offset];
*eof = 0;
if (nid.len > request + offset)
return request;
*eof = 1;
return nid.len - offset;
}
static int drm_gem_object_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
#if DRM_DEBUG_CODE
static int drm__vma_info(char *buf, char **start, off_t offset, int request,

View file

@ -107,7 +107,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
dev->irq = pdev->irq;
if (drm_ht_create(&dev->map_hash, 12)) {
return -ENOMEM;
@ -152,6 +151,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
goto error_out_unreg;
}
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
if (retcode) {
DRM_ERROR("Cannot initialize graphics execution "
"manager (GEM)\n");
goto error_out_unreg;
}
}
return 0;
error_out_unreg:
@ -317,6 +325,7 @@ int drm_put_dev(struct drm_device * dev)
int drm_put_minor(struct drm_minor **minor_p)
{
struct drm_minor *minor = *minor_p;
DRM_DEBUG("release secondary minor %d\n", minor->index);
if (minor->type == DRM_MINOR_LEGACY)

View file

@ -184,7 +184,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
err_out_files:
if (i > 0)
for (j = 0; j < i; j++)
device_remove_file(&minor->kdev, &device_attrs[i]);
device_remove_file(&minor->kdev, &device_attrs[j]);
device_unregister(&minor->kdev);
err_out:

View file

@ -3,7 +3,12 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
i915_suspend.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_proc.o \
i915_gem_tiling.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o

View file

@ -40,40 +40,96 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
u32 last_acthd = I915_READ(acthd_reg);
u32 acthd;
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
int i;
for (i = 0; i < 10000; i++) {
ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
for (i = 0; i < 100000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
if (ring->space >= n)
return 0;
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
if (ring->head != last_head)
i = 0;
if (acthd != last_acthd)
i = 0;
last_head = ring->head;
last_acthd = acthd;
msleep_interruptible(10);
}
return -EBUSY;
}
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
*/
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
return 0;
}
/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
*/
static void i915_free_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
}
if (dev_priv->status_gfx_addr) {
dev_priv->status_gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
}
/* Need to rewrite hardware status page */
I915_WRITE(HWS_PGA, 0x1ffff000);
}
void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
if (ring->head == ring->tail)
if (ring->head == ring->tail && dev_priv->sarea_priv)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
@ -84,28 +140,19 @@ static int i915_dma_cleanup(struct drm_device * dev)
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev_priv->ring.virtual_start) {
drm_core_ioremapfree(&dev_priv->ring.map, dev);
dev_priv->ring.virtual_start = 0;
dev_priv->ring.map.handle = 0;
dev_priv->ring.virtual_start = NULL;
dev_priv->ring.map.handle = NULL;
dev_priv->ring.map.size = 0;
}
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
/* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000);
}
if (dev_priv->status_gfx_addr) {
dev_priv->status_gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
I915_WRITE(0x2080, 0x1ffff000);
}
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
return 0;
}
@ -121,34 +168,34 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
return -EINVAL;
}
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio_map) {
i915_dma_cleanup(dev);
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
}
dev_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
dev_priv->ring.Start = init->ring_start;
dev_priv->ring.End = init->ring_end;
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
if (init->ring_size != 0) {
if (dev_priv->ring.ring_obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
dev_priv->ring.map.offset = init->ring_start;
dev_priv->ring.map.size = init->ring_size;
dev_priv->ring.map.type = 0;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
drm_core_ioremap(&dev_priv->ring.map, dev);
dev_priv->ring.map.offset = init->ring_start;
dev_priv->ring.map.size = init->ring_size;
dev_priv->ring.map.type = 0;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
if (dev_priv->ring.map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@ -159,34 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
dev_priv->current_page = 0;
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
/* We are using separate values as placeholders for mechanisms for
* private backbuffer/depthbuffer usage.
*/
dev_priv->use_mi_batchbuffer_start = 0;
if (IS_I965G(dev)) /* 965 doesn't support older method */
dev_priv->use_mi_batchbuffer_start = 1;
/* Allow hardware batchbuffers unless told otherwise.
*/
dev_priv->allow_batchbuffer = 1;
/* Program Hardware Status Page */
if (!I915_NEED_GFX_HWS(dev)) {
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
if (!dev_priv->status_page_dmah) {
i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(0x02080, dev_priv->dma_status_page);
}
DRM_DEBUG("Enabled hardware status page\n");
return 0;
}
@ -201,11 +224,6 @@ static int i915_dma_resume(struct drm_device * dev)
return -EINVAL;
}
if (!dev_priv->mmio_map) {
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
}
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@ -220,9 +238,9 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
if (dev_priv->status_gfx_addr != 0)
I915_WRITE(0x02080, dev_priv->status_gfx_addr);
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
else
I915_WRITE(0x02080, dev_priv->dma_status_page);
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
return 0;
@ -367,9 +385,10 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
return 0;
}
static int i915_emit_box(struct drm_device * dev,
struct drm_clip_rect __user * boxes,
int i, int DR1, int DR4)
int
i915_emit_box(struct drm_device *dev,
struct drm_clip_rect __user *boxes,
int i, int DR1, int DR4)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_clip_rect box;
@ -415,14 +434,15 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
dev_priv->counter = 0;
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
BEGIN_LP_RING(4);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(20);
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
@ -486,7 +506,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
return ret;
}
if (dev_priv->use_mi_batchbuffer_start) {
if (!IS_I830(dev) && !IS_845G(dev)) {
BEGIN_LP_RING(2);
if (IS_I965G(dev)) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
@ -516,6 +536,9 @@ static int i915_dispatch_flip(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
if (!dev_priv->sarea_priv)
return -EINVAL;
DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
__func__,
dev_priv->current_page,
@ -524,7 +547,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
i915_kernel_lost_context(dev);
BEGIN_LP_RING(2);
OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
OUT_RING(MI_FLUSH | MI_READ_FLUSH);
OUT_RING(0);
ADVANCE_LP_RING();
@ -549,8 +572,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
BEGIN_LP_RING(4);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(20);
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
@ -570,9 +593,15 @@ static int i915_quiescent(struct drm_device * dev)
static int i915_flush_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
LOCK_TEST_WITH_RETURN(dev, file_priv);
int ret;
return i915_quiescent(dev);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
mutex_lock(&dev->struct_mutex);
ret = i915_quiescent(dev);
mutex_unlock(&dev->struct_mutex);
return ret;
}
static int i915_batchbuffer(struct drm_device *dev, void *data,
@ -593,16 +622,19 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect)))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_batchbuffer(dev, batch);
mutex_unlock(&dev->struct_mutex);
sarea_priv->last_dispatch = (int)hw_status[5];
if (sarea_priv)
sarea_priv->last_dispatch = (int)hw_status[5];
return ret;
}
@ -619,7 +651,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects &&
DRM_VERIFYAREA_READ(cmdbuf->cliprects,
@ -629,24 +661,33 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
return -EFAULT;
}
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
return ret;
}
sarea_priv->last_dispatch = (int)hw_status[5];
if (sarea_priv)
sarea_priv->last_dispatch = (int)hw_status[5];
return 0;
}
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
DRM_DEBUG("%s\n", __func__);
LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
return i915_dispatch_flip(dev);
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_flip(dev);
mutex_unlock(&dev->struct_mutex);
return ret;
}
static int i915_getparam(struct drm_device *dev, void *data,
@ -663,7 +704,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
value = dev->irq ? 1 : 0;
value = dev->pdev->irq ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
value = dev_priv->allow_batchbuffer ? 1 : 0;
@ -671,6 +712,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
break;
case I915_PARAM_CHIPSET_ID:
value = dev->pci_device;
break;
case I915_PARAM_HAS_GEM:
value = 1;
break;
default:
DRM_ERROR("Unknown parameter %d\n", param->param);
return -EINVAL;
@ -697,8 +744,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
if (!IS_I965G(dev))
dev_priv->use_mi_batchbuffer_start = param->value;
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
dev_priv->tex_lru_log_granularity = param->value;
@ -749,8 +794,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
dev_priv->hw_status_page = dev_priv->hws_map.handle;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(0x02080, dev_priv->status_gfx_addr);
DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
dev_priv->status_gfx_addr);
DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
return 0;
@ -776,14 +821,38 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
memset(dev_priv, 0, sizeof(drm_i915_private_t));
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
/* Add register map (needed for suspend/resume) */
base = drm_get_resource_start(dev, mmio_bar);
size = drm_get_resource_len(dev, mmio_bar);
ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
_DRM_KERNEL | _DRM_DRIVER,
&dev_priv->mmio_map);
dev_priv->regs = ioremap(base, size);
i915_gem_load(dev);
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
if (ret != 0)
return ret;
}
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
* correctly in testing on 945G.
* This may be a side effect of MSI having been made available for PEG
* and the registers being closely associated.
*/
if (!IS_I945G(dev) && !IS_I945GM(dev))
if (pci_enable_msi(dev->pdev))
DRM_ERROR("failed to enable MSI\n");
intel_opregion_init(dev);
spin_lock_init(&dev_priv->user_irq_lock);
return ret;
}
@ -791,8 +860,15 @@ int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->mmio_map)
drm_rmmap(dev, dev_priv->mmio_map);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
i915_free_hws(dev);
if (dev_priv->regs != NULL)
iounmap(dev_priv->regs);
intel_opregion_free(dev);
drm_free(dev->dev_private, sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
@ -800,6 +876,25 @@ int i915_driver_unload(struct drm_device *dev)
return 0;
}
int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_i915_file_private *i915_file_priv;
DRM_DEBUG("\n");
i915_file_priv = (struct drm_i915_file_private *)
drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
if (!i915_file_priv)
return -ENOMEM;
file_priv->driver_priv = i915_file_priv;
i915_file_priv->mm.last_gem_seqno = 0;
i915_file_priv->mm.last_gem_throttle_seqno = 0;
return 0;
}
void i915_driver_lastclose(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@ -807,6 +902,8 @@ void i915_driver_lastclose(struct drm_device * dev)
if (!dev_priv)
return;
i915_gem_lastclose(dev);
if (dev_priv->agp_heap)
i915_mem_takedown(&(dev_priv->agp_heap));
@ -819,6 +916,13 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
}
struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
@ -836,7 +940,23 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);

View file

@ -38,211 +38,9 @@ static struct pci_device_id pciidlist[] = {
i915_PCI_IDS
};
enum pipe {
PIPE_A = 0,
PIPE_B,
};
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (pipe == PIPE_A)
return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
else
return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
}
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
{
outb(reg, index_port);
return inb(data_port);
}
static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
{
inb(st01);
outb(palette_enable | reg, VGA_AR_INDEX);
return inb(VGA_AR_DATA_READ);
}
static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
{
inb(st01);
outb(palette_enable | reg, VGA_AR_INDEX);
outb(val, VGA_AR_DATA_WRITE);
}
static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
{
outb(reg, index_port);
outb(val, data_port);
}
static void i915_save_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* VGA color palette registers */
dev_priv->saveDACMASK = inb(VGA_DACMASK);
/* DACCRX automatically increments during read */
outb(0, VGA_DACRX);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
/* MSR bits */
dev_priv->saveMSR = inb(VGA_MSR_READ);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* CRT controller regs */
i915_write_indexed(cr_index, cr_data, 0x11,
i915_read_indexed(cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
dev_priv->saveCR[i] =
i915_read_indexed(cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
dev_priv->saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
inb(st01);
dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
inb(st01);
outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
inb(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
dev_priv->saveGR[i] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
dev_priv->saveGR[0x10] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
dev_priv->saveGR[0x11] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
dev_priv->saveGR[0x18] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
dev_priv->saveSR[i] =
i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
}
static void i915_restore_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* MSR bits */
outb(dev_priv->saveMSR, VGA_MSR_WRITE);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
dev_priv->saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
dev_priv->saveGR[i]);
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
dev_priv->saveGR[0x10]);
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
dev_priv->saveGR[0x11]);
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
dev_priv->saveGR[0x18]);
/* Attribute controller registers */
inb(st01);
for (i = 0; i <= 0x14; i++)
i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
inb(st01); /* switch back to index mode */
outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
inb(st01);
/* VGA color palette registers */
outb(dev_priv->saveDACMASK, VGA_DACMASK);
/* DACCRX automatically increments during read */
outb(0, VGA_DACWX);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
}
static int i915_suspend(struct drm_device *dev, pm_message_t state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
if (!dev || !dev_priv) {
printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
@ -254,122 +52,10 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
return 0;
pci_save_state(dev->pdev);
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Display arbitration control */
dev_priv->saveDSPARB = I915_READ(DSPARB);
i915_save_state(dev);
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
dev_priv->saveFPA0 = I915_READ(FPA0);
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
if (IS_I965G(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
dev_priv->saveDSPABASE = I915_READ(DSPABASE);
if (IS_I965G(dev)) {
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
dev_priv->saveFPB0 = I915_READ(FPB0);
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
if (IS_I965G(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
/* CRT state */
dev_priv->saveADPA = I915_READ(ADPA);
/* LVDS state */
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (IS_I965G(dev))
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->saveLVDS = I915_READ(LVDS);
if (!IS_I830(dev) && !IS_845G(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
/* FIXME: save TV & SDVO state */
/* FBC state */
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
/* Interrupt state */
dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
/* VGA state */
dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
/* Clock gating state */
dev_priv->saveD_STATE = I915_READ(D_STATE);
dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
i915_save_vga(dev);
intel_opregion_free(dev);
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
@ -382,155 +68,15 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
static int i915_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
if (pci_enable_device(dev->pdev))
return -1;
pci_set_master(dev->pdev);
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
i915_restore_state(dev);
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
udelay(150);
}
I915_WRITE(FPA0, dev_priv->saveFPA0);
I915_WRITE(FPA1, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
udelay(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
I915_WRITE(DSPABASE, I915_READ(DSPABASE));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
udelay(150);
}
I915_WRITE(FPB0, dev_priv->saveFPB0);
I915_WRITE(FPB1, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
udelay(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
/* Restore plane info */
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
/* CRT state */
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
if (IS_I965G(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
/* FIXME: restore TV & SDVO state */
/* FBC info */
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
/* VGA state */
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
udelay(150);
/* Clock gating state */
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
i915_restore_vga(dev);
intel_opregion_init(dev);
return 0;
}
@ -541,17 +87,19 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
DRIVER_IRQ_VBL2,
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
.load = i915_driver_load,
.unload = i915_driver_unload,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
.suspend = i915_suspend,
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
.vblank_wait = i915_driver_vblank_wait,
.vblank_wait2 = i915_driver_vblank_wait2,
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
@ -559,6 +107,10 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.proc_init = i915_gem_proc_init,
.proc_cleanup = i915_gem_proc_cleanup,
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.ioctls = i915_ioctls,
.fops = {
.owner = THIS_MODULE,

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,201 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Keith Packard <keithp@keithp.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#if WATCH_INACTIVE
void
i915_verify_inactive(struct drm_device *dev, char *file, int line)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
obj = obj_priv->obj;
if (obj_priv->pin_count || obj_priv->active ||
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT)))
DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
obj,
obj_priv->pin_count, obj_priv->active,
obj->write_domain, file, line);
}
}
#endif /* WATCH_INACTIVE */
#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
static void
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
uint32_t bias, uint32_t mark)
{
uint32_t *mem = kmap_atomic(page, KM_USER0);
int i;
for (i = start; i < end; i += 4)
DRM_INFO("%08x: %08x%s\n",
(int) (bias + i), mem[i / 4],
(bias + i == mark) ? " ********" : "");
kunmap_atomic(mem, KM_USER0);
/* give syslog time to catch up */
msleep(1);
}
void
i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page;
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
page_len = len - page * PAGE_SIZE;
if (page_len > PAGE_SIZE)
page_len = PAGE_SIZE;
for (chunk = 0; chunk < page_len; chunk += 128) {
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
i915_gem_dump_page(obj_priv->page_list[page],
chunk, chunk + chunk_len,
obj_priv->gtt_offset +
page * PAGE_SIZE,
mark);
}
}
}
#endif
#if WATCH_LRU
void
i915_dump_lru(struct drm_device *dev, const char *where)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
DRM_INFO("active list %s {\n", where);
list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
list)
{
DRM_INFO(" %p: %08x\n", obj_priv,
obj_priv->last_rendering_seqno);
}
DRM_INFO("}\n");
DRM_INFO("flushing list %s {\n", where);
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
list)
{
DRM_INFO(" %p: %08x\n", obj_priv,
obj_priv->last_rendering_seqno);
}
DRM_INFO("}\n");
DRM_INFO("inactive %s {\n", where);
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
DRM_INFO(" %p: %08x\n", obj_priv,
obj_priv->last_rendering_seqno);
}
DRM_INFO("}\n");
}
#endif
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
__func__, obj, obj_priv->gtt_offset, handle,
obj->size / 1024);
gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
obj->size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
}
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
goto out;
}
for (i = 0; i < PAGE_SIZE / 4; i++) {
uint32_t cpuval = backing_map[i];
uint32_t gttval = readl(gtt_mapping +
page * 1024 + i);
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
(int)(obj_priv->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
DRM_INFO("...\n");
goto out;
}
}
}
kunmap_atomic(backing_map, KM_USER0);
backing_map = NULL;
}
out:
if (backing_map != NULL)
kunmap_atomic(backing_map, KM_USER0);
iounmap(gtt_mapping);
/* give syslog time to catch up */
msleep(1);
/* Directly flush the object, since we just loaded values with the CPU
* from the backing pages and we don't want to disturb the cache
* management that we're trying to observe.
*/
i915_gem_clflush_object(obj);
}
#endif

View file

@ -0,0 +1,292 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Keith Packard <keithp@keithp.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
static int i915_gem_active_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Active:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n",
obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Flushing:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Inactive:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_request_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Request:\n");
list_for_each_entry(gem_request, &dev_priv->mm.request_list,
list)
{
DRM_PROC_PRINT(" %d @ %d %08x\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies),
gem_request->flush_domains);
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_interrupt_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Interrupt enable: %08x\n",
I915_READ(IER));
DRM_PROC_PRINT("Interrupt identity: %08x\n",
I915_READ(IIR));
DRM_PROC_PRINT("Interrupt mask: %08x\n",
I915_READ(IMR));
DRM_PROC_PRINT("Pipe A stat: %08x\n",
I915_READ(PIPEASTAT));
DRM_PROC_PRINT("Pipe B stat: %08x\n",
I915_READ(PIPEBSTAT));
DRM_PROC_PRINT("Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
DRM_PROC_PRINT("Current sequence: %d\n",
i915_get_gem_seqno(dev));
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n",
dev_priv->mm.irq_gem_seqno);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static struct drm_proc_list {
/** file name */
const char *name;
/** proc callback*/
int (*f) (char *, char **, off_t, int, int *, void *);
} i915_gem_proc_list[] = {
{"i915_gem_active", i915_gem_active_info},
{"i915_gem_flushing", i915_gem_flushing_info},
{"i915_gem_inactive", i915_gem_inactive_info},
{"i915_gem_request", i915_gem_request_info},
{"i915_gem_seqno", i915_gem_seqno_info},
{"i915_gem_interrupt", i915_interrupt_info},
};
#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
int i915_gem_proc_init(struct drm_minor *minor)
{
struct proc_dir_entry *ent;
int i, j;
for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
ent = create_proc_entry(i915_gem_proc_list[i].name,
S_IFREG | S_IRUGO, minor->dev_root);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/.../%s\n",
i915_gem_proc_list[i].name);
for (j = 0; j < i; j++)
remove_proc_entry(i915_gem_proc_list[i].name,
minor->dev_root);
return -1;
}
ent->read_proc = i915_gem_proc_list[i].f;
ent->data = minor;
}
return 0;
}
void i915_gem_proc_cleanup(struct drm_minor *minor)
{
int i;
if (!minor->dev_root)
return;
for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
}

View file

@ -0,0 +1,257 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
/** @file i915_gem_tiling.c
*
* Support for managing tiling state of buffer objects.
*
* The idea behind tiling is to increase cache hit rates by rearranging
* pixel data so that a group of pixel accesses are in the same cacheline.
* Performance improvement from doing this on the back/depth buffer are on
* the order of 30%.
*
* Intel architectures make this somewhat more complicated, though, by
* adjustments made to addressing of data when the memory is in interleaved
* mode (matched pairs of DIMMS) to improve memory bandwidth.
* For interleaved memory, the CPU sends every sequential 64 bytes
* to an alternate memory channel so it can get the bandwidth from both.
*
* The GPU also rearranges its accesses for increased bandwidth to interleaved
* memory, and it matches what the CPU does for non-tiled. However, when tiled
* it does it a little differently, since one walks addresses not just in the
* X direction but also Y. So, along with alternating channels when bit
* 6 of the address flips, it also alternates when other bits flip -- Bits 9
* (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
* are common to both the 915 and 965-class hardware.
*
* The CPU also sometimes XORs in higher bits as well, to improve
* bandwidth doing strided access like we do so frequently in graphics. This
* is called "Channel XOR Randomization" in the MCH documentation. The result
* is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
* decode.
*
* All of this bit 6 XORing has an effect on our memory management,
* as we need to make sure that the 3d driver can correctly address object
* contents.
*
* If we don't have interleaved memory, all tiling is safe and no swizzling is
* required.
*
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
* 17 is not just a page offset, so as we page an objet out and back in,
* individual pages in it will have different bit 17 addresses, resulting in
* each 64 bytes being swapped with its neighbor!
*
* Otherwise, if interleaved, we have to tell the 3d driver what the address
* swizzling it needs to do is, since it's writing with the CPU to the pages
* (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
* pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
* required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
* to match what the GPU expects.
*/
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (!IS_I9XX(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
IS_GM45(dev)) {
uint32_t dcc;
/* On 915-945 and GM965, channel interleave by the CPU is
* determined by DCC. The CPU will alternate based on bit 6
* in interleaved mode, and the GPU will then also alternate
* on bit 6, 9, and 10 for X, but the CPU may also optionally
* alternate based on bit 17 (XOR not disabled and XOR
* bit == 17).
*/
dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (IS_I915G(dev) || IS_I915GM(dev) ||
dcc & DCC_CHANNEL_XOR_DISABLE) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_I965GM(dev) || IS_GM45(dev)) {
/* GM965 only does bit 11-based channel
* randomization
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
/* Bit 17 or perhaps other swizzling */
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
break;
}
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
* swizzling for tiled objects from the CPU.
*
* Here's what I found on the G965:
* slot fill memory size swizzling
* 0A 0B 1A 1B 1-ch 2-ch
* 512 0 0 0 512 0 O
* 512 0 512 0 16 1008 X
* 512 0 0 512 16 1008 X
* 0 512 0 512 16 1008 X
* 1024 1024 1024 0 2048 1024 O
*
* We could probably detect this based on either the DRB
* matching, which was the case for the swizzling required in
* the table above, or from the 1-ch value being less than
* the minimum size of a rank.
*/
if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
}
dev_priv->mm.bit_6_swizzle_x = swizzle_x;
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
/**
* Sets the tiling mode of an object, returning the required swizzling of
* bit 6 of addresses in the object.
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
obj_priv = obj->driver_private;
mutex_lock(&dev->struct_mutex);
if (args->tiling_mode == I915_TILING_NONE) {
obj_priv->tiling_mode = I915_TILING_NONE;
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
} else {
if (args->tiling_mode == I915_TILING_X)
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
else
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
/* If we can't handle the swizzling, make it untiled. */
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
args->tiling_mode = I915_TILING_NONE;
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
}
}
obj_priv->tiling_mode = args->tiling_mode;
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
return 0;
}
/**
* Returns the current tiling mode and required bit 6 swizzling for the object.
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
obj_priv = obj->driver_private;
mutex_lock(&dev->struct_mutex);
args->tiling_mode = obj_priv->tiling_mode;
switch (obj_priv->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
case I915_TILING_Y:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
break;
case I915_TILING_NONE:
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
break;
default:
DRM_ERROR("unknown tiling mode\n");
}
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
return 0;
}

View file

@ -31,12 +31,92 @@
#include "i915_drm.h"
#include "i915_drv.h"
#define USER_INT_FLAG (1<<1)
#define VSYNC_PIPEB_FLAG (1<<5)
#define VSYNC_PIPEA_FLAG (1<<7)
#define MAX_NOPID ((u32)~0)
/** These are the interrupts used by the driver */
#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
I915_ASLE_INTERRUPT | \
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
void
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != 0) {
dev_priv->irq_mask_reg &= ~mask;
I915_WRITE(IMR, dev_priv->irq_mask_reg);
(void) I915_READ(IMR);
}
}
static inline void
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask_reg & mask) != mask) {
dev_priv->irq_mask_reg |= mask;
I915_WRITE(IMR, dev_priv->irq_mask_reg);
(void) I915_READ(IMR);
}
}
/**
* i915_get_pipe - return the the pipe associated with a given plane
* @dev: DRM device
* @plane: plane to look for
*
* The Intel Mesa & 2D drivers call the vblank routines with a plane number
* rather than a pipe number, since they may not always be equal. This routine
* maps the given @plane back to a pipe number.
*/
static int
i915_get_pipe(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 dspcntr;
dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
}
/**
* i915_get_plane - return the the plane associated with a given pipe
* @dev: DRM device
* @pipe: pipe to look for
*
* The Intel Mesa & 2D drivers call the vblank routines with a plane number
* rather than a plane number, since they may not always be equal. This routine
* maps the given @pipe back to a plane number.
*/
static int
i915_get_plane(struct drm_device *dev, int pipe)
{
if (i915_get_pipe(dev, 0) == pipe)
return 0;
return 1;
}
/**
* i915_pipe_enabled - check if a pipe is enabled
* @dev: DRM device
* @pipe: pipe to check
*
* Reading certain registers when the pipe is disabled can hang the chip.
* Use this routine to make sure the PLL is running and the pipe is active
* before reading such registers if unsure.
*/
static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
return 1;
return 0;
}
/**
* Emit blits for scheduled buffer swaps.
*
@ -48,8 +128,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
unsigned long irqflags;
struct list_head *list, *tmp, hits, *hit;
int nhits, nrects, slice[2], upper[2], lower[2], i;
unsigned counter[2] = { atomic_read(&dev->vbl_received),
atomic_read(&dev->vbl_received2) };
unsigned counter[2];
struct drm_drawable_info *drw;
drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
u32 cpp = dev_priv->cpp;
@ -71,6 +150,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
src_pitch >>= 2;
}
counter[0] = drm_vblank_count(dev, 0);
counter[1] = drm_vblank_count(dev, 1);
DRM_DEBUG("\n");
INIT_LIST_HEAD(&hits);
@ -83,12 +165,14 @@ static void i915_vblank_tasklet(struct drm_device *dev)
list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
drm_i915_vbl_swap_t *vbl_swap =
list_entry(list, drm_i915_vbl_swap_t, head);
int pipe = i915_get_pipe(dev, vbl_swap->plane);
if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
continue;
list_del(list);
dev_priv->swaps_pending--;
drm_vblank_put(dev, pipe);
spin_unlock(&dev_priv->swaps_lock);
spin_lock(&dev->drw_lock);
@ -181,7 +265,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
drm_i915_vbl_swap_t *swap_hit =
list_entry(hit, drm_i915_vbl_swap_t, head);
struct drm_clip_rect *rect;
int num_rects, pipe;
int num_rects, plane;
unsigned short top, bottom;
drw = drm_get_drawable_info(dev, swap_hit->drw_id);
@ -190,9 +274,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
continue;
rect = drw->rects;
pipe = swap_hit->pipe;
top = upper[pipe];
bottom = lower[pipe];
plane = swap_hit->plane;
top = upper[plane];
bottom = lower[plane];
for (num_rects = drw->num_rects; num_rects--; rect++) {
int y1 = max(rect->y1, top);
@ -229,61 +313,139 @@ static void i915_vblank_tasklet(struct drm_device *dev)
}
}
u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
u32 high1, high2, low, count;
int pipe;
pipe = i915_get_pipe(dev, plane);
high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
if (!i915_pipe_enabled(dev, pipe)) {
DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
return 0;
}
/*
* High & low register fields aren't synchronized, so make sure
* we get a low value that's stable across two reads of the high
* register.
*/
do {
high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
PIPE_FRAME_HIGH_SHIFT);
low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
PIPE_FRAME_LOW_SHIFT);
high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
PIPE_FRAME_HIGH_SHIFT);
} while (high1 != high2);
count = (high1 << 8) | low;
return count;
}
void
i915_gem_vblank_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
dev_priv = container_of(work, drm_i915_private_t,
mm.vblank_work);
dev = dev_priv->dev;
mutex_lock(&dev->struct_mutex);
i915_vblank_tasklet(dev);
mutex_unlock(&dev->struct_mutex);
}
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u16 temp;
u32 iir;
u32 pipea_stats, pipeb_stats;
int vblank = 0;
pipea_stats = I915_READ(I915REG_PIPEASTAT);
pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
atomic_inc(&dev_priv->irq_received);
temp = I915_READ16(I915REG_INT_IDENTITY_R);
if (dev->pdev->msi_enabled)
I915_WRITE(IMR, ~0);
iir = I915_READ(IIR);
temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
if (temp == 0)
if (iir == 0) {
if (dev->pdev->msi_enabled) {
I915_WRITE(IMR, dev_priv->irq_mask_reg);
(void) I915_READ(IMR);
}
return IRQ_NONE;
}
I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
(void) I915_READ16(I915REG_INT_IDENTITY_R);
DRM_READMEMORYBARRIER();
/*
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
* we may get extra interrupts.
*/
if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
pipea_stats = I915_READ(PIPEASTAT);
if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
PIPE_VBLANK_INTERRUPT_ENABLE);
else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
PIPE_VBLANK_INTERRUPT_STATUS)) {
vblank++;
drm_handle_vblank(dev, i915_get_plane(dev, 0));
}
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
I915_WRITE(PIPEASTAT, pipea_stats);
}
if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
pipeb_stats = I915_READ(PIPEBSTAT);
/* Ack the event */
I915_WRITE(PIPEBSTAT, pipeb_stats);
if (temp & USER_INT_FLAG)
/* The vblank interrupt gets enabled even if we didn't ask for
it, so make sure it's shut down again */
if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
PIPE_VBLANK_INTERRUPT_ENABLE);
else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
PIPE_VBLANK_INTERRUPT_STATUS)) {
vblank++;
drm_handle_vblank(dev, i915_get_plane(dev, 1));
}
if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
opregion_asle_intr(dev);
I915_WRITE(PIPEBSTAT, pipeb_stats);
}
I915_WRITE(IIR, iir);
if (dev->pdev->msi_enabled)
I915_WRITE(IMR, dev_priv->irq_mask_reg);
(void) I915_READ(IIR); /* Flush posted writes */
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->last_dispatch =
READ_BREADCRUMB(dev_priv);
if (iir & I915_USER_INTERRUPT) {
dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
DRM_WAKEUP(&dev_priv->irq_queue);
}
if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
int vblank_pipe = dev_priv->vblank_pipe;
if (iir & I915_ASLE_INTERRUPT)
opregion_asle_intr(dev);
if ((vblank_pipe &
(DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
== (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
if (temp & VSYNC_PIPEA_FLAG)
atomic_inc(&dev->vbl_received);
if (temp & VSYNC_PIPEB_FLAG)
atomic_inc(&dev->vbl_received2);
} else if (((temp & VSYNC_PIPEA_FLAG) &&
(vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
((temp & VSYNC_PIPEB_FLAG) &&
(vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
if (dev_priv->swaps_pending > 0)
if (vblank && dev_priv->swaps_pending > 0) {
if (dev_priv->ring.ring_obj == NULL)
drm_locked_tasklet(dev, i915_vblank_tasklet);
I915_WRITE(I915REG_PIPEASTAT,
pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
I915_VBLANK_CLEAR);
I915_WRITE(I915REG_PIPEBSTAT,
pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
I915_VBLANK_CLEAR);
else
schedule_work(&dev_priv->mm.vblank_work);
}
return IRQ_HANDLED;
@ -298,23 +460,45 @@ static int i915_emit_irq(struct drm_device * dev)
DRM_DEBUG("\n");
dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
dev_priv->counter = 1;
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
BEGIN_LP_RING(6);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(20);
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
OUT_RING(0);
OUT_RING(GFX_OP_USER_INTERRUPT);
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
return dev_priv->counter;
}
void i915_user_irq_get(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
spin_lock(&dev_priv->user_irq_lock);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
spin_unlock(&dev_priv->user_irq_lock);
}
void i915_user_irq_put(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
spin_lock(&dev_priv->user_irq_lock);
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
spin_unlock(&dev_priv->user_irq_lock);
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -323,55 +507,34 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
if (READ_BREADCRUMB(dev_priv) >= irq_nr)
if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
if (dev_priv->sarea_priv) {
dev_priv->sarea_priv->last_dispatch =
READ_BREADCRUMB(dev_priv);
}
return 0;
}
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
i915_user_irq_get(dev);
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
i915_user_irq_put(dev);
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
}
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return ret;
}
static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
atomic_t *counter)
{
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(counter))
- *sequence) <= (1<<23)));
*sequence = cur_vblank;
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->last_dispatch =
READ_BREADCRUMB(dev_priv);
return ret;
}
int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
{
return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
}
int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
{
return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
}
/* Needs the lock as it touches the ring.
*/
int i915_irq_emit(struct drm_device *dev, void *data,
@ -381,14 +544,15 @@ int i915_irq_emit(struct drm_device *dev, void *data,
drm_i915_irq_emit_t *emit = data;
int result;
LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
mutex_lock(&dev->struct_mutex);
result = i915_emit_irq(dev);
mutex_unlock(&dev->struct_mutex);
if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
@ -414,18 +578,74 @@ int i915_irq_wait(struct drm_device *dev, void *data,
return i915_wait_irq(dev, irqwait->irq_seq);
}
static void i915_enable_interrupt (struct drm_device *dev)
int i915_enable_vblank(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u16 flag;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
u32 pipestat;
flag = 0;
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
flag |= VSYNC_PIPEA_FLAG;
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
flag |= VSYNC_PIPEB_FLAG;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
break;
case 1:
pipestat_reg = PIPEBSTAT;
i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
pipe);
break;
}
I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
if (pipestat_reg) {
pipestat = I915_READ(pipestat_reg);
if (IS_I965G(dev))
pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
else
pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
/* Clear any stale interrupt status */
pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
}
return 0;
}
void i915_disable_vblank(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
break;
case 1:
pipestat_reg = PIPEBSTAT;
i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
break;
default:
DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
pipe);
break;
}
if (pipestat_reg) {
pipestat = I915_READ(pipestat_reg);
pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
PIPE_VBLANK_INTERRUPT_ENABLE);
/* Clear any stale interrupt status */
pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
}
}
/* Set the vblank monitor pipe
@ -434,22 +654,12 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_pipe_t *pipe = data;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
return -EINVAL;
}
dev_priv->vblank_pipe = pipe->pipe;
i915_enable_interrupt (dev);
return 0;
}
@ -458,19 +668,13 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_pipe_t *pipe = data;
u16 flag;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
flag = I915_READ(I915REG_INT_ENABLE_R);
pipe->pipe = 0;
if (flag & VSYNC_PIPEA_FLAG)
pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
if (flag & VSYNC_PIPEB_FLAG)
pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
return 0;
}
@ -484,11 +688,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_swap_t *swap = data;
drm_i915_vbl_swap_t *vbl_swap;
unsigned int pipe, seqtype, curseq;
unsigned int pipe, seqtype, curseq, plane;
unsigned long irqflags;
struct list_head *list;
int ret;
if (!dev_priv) {
if (!dev_priv || !dev_priv->sarea_priv) {
DRM_ERROR("%s called with no initialization\n", __func__);
return -EINVAL;
}
@ -504,7 +709,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
pipe = i915_get_pipe(dev, plane);
seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
@ -523,7 +729,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
/*
* We take the ref here and put it when the swap actually completes
* in the tasklet.
*/
ret = drm_vblank_get(dev, pipe);
if (ret)
return ret;
curseq = drm_vblank_count(dev, pipe);
if (seqtype == _DRM_VBLANK_RELATIVE)
swap->sequence += curseq;
@ -533,6 +746,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
swap->sequence = curseq + 1;
} else {
DRM_DEBUG("Missed target sequence\n");
drm_vblank_put(dev, pipe);
return -EINVAL;
}
}
@ -543,7 +757,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
if (vbl_swap->drw_id == swap->drawable &&
vbl_swap->pipe == pipe &&
vbl_swap->plane == plane &&
vbl_swap->sequence == swap->sequence) {
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
DRM_DEBUG("Already scheduled\n");
@ -555,6 +769,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
if (dev_priv->swaps_pending >= 100) {
DRM_DEBUG("Too many swaps queued\n");
drm_vblank_put(dev, pipe);
return -EBUSY;
}
@ -562,13 +777,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
if (!vbl_swap) {
DRM_ERROR("Failed to allocate memory to queue swap\n");
drm_vblank_put(dev, pipe);
return -ENOMEM;
}
DRM_DEBUG("\n");
vbl_swap->drw_id = swap->drawable;
vbl_swap->pipe = pipe;
vbl_swap->plane = plane;
vbl_swap->sequence = swap->sequence;
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
@ -587,37 +803,63 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
I915_WRITE16(I915REG_HWSTAM, 0xfffe);
I915_WRITE16(I915REG_INT_MASK_R, 0x0);
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
I915_WRITE(HWSTAM, 0xeffe);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
}
void i915_driver_irq_postinstall(struct drm_device * dev)
int i915_driver_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret, num_pipes = 2;
spin_lock_init(&dev_priv->swaps_lock);
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
if (!dev_priv->vblank_pipe)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
i915_enable_interrupt(dev);
/* Set initial unmasked IRQs to just the selected vblank pipes. */
dev_priv->irq_mask_reg = ~0;
ret = drm_vblank_init(dev, num_pipes);
if (ret)
return ret;
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
I915_WRITE(IMR, dev_priv->irq_mask_reg);
I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
(void) I915_READ(IER);
opregion_enable_asle(dev);
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
return 0;
}
void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u16 temp;
u32 temp;
if (!dev_priv)
return;
I915_WRITE16(I915REG_HWSTAM, 0xffff);
I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
dev_priv->vblank_pipe = 0;
temp = I915_READ16(I915REG_INT_IDENTITY_R);
I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
temp = I915_READ(PIPEASTAT);
I915_WRITE(PIPEASTAT, temp);
temp = I915_READ(PIPEBSTAT);
I915_WRITE(PIPEBSTAT, temp);
temp = I915_READ(IIR);
I915_WRITE(IIR, temp);
}

View file

@ -0,0 +1,371 @@
/*
* Copyright 2008 Intel Corporation <hong.liu@intel.com>
* Copyright 2008 Red Hat <mjg@redhat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/acpi.h>
#include "drmP.h"
#include "i915_drm.h"
#include "i915_drv.h"
#define PCI_ASLE 0xe4
#define PCI_LBPC 0xf4
#define PCI_ASLS 0xfc
#define OPREGION_SZ (8*1024)
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
#define OPREGION_VBT_OFFSET 0x1000
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_ACPI (1<<0)
#define MBOX_SWSCI (1<<1)
#define MBOX_ASLE (1<<2)
struct opregion_header {
u8 signature[16];
u32 size;
u32 opregion_ver;
u8 bios_ver[32];
u8 vbios_ver[16];
u8 driver_ver[16];
u32 mboxes;
u8 reserved[164];
} __attribute__((packed));
/* OpRegion mailbox #1: public ACPI methods */
struct opregion_acpi {
u32 drdy; /* driver readiness */
u32 csts; /* notification status */
u32 cevt; /* current event */
u8 rsvd1[20];
u32 didl[8]; /* supported display devices ID list */
u32 cpdl[8]; /* currently presented display list */
u32 cadl[8]; /* currently active display list */
u32 nadl[8]; /* next active devices list */
u32 aslp; /* ASL sleep time-out */
u32 tidx; /* toggle table index */
u32 chpd; /* current hotplug enable indicator */
u32 clid; /* current lid state*/
u32 cdck; /* current docking state */
u32 sxsw; /* Sx state resume */
u32 evts; /* ASL supported events */
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
u8 rsvd2[60];
} __attribute__((packed));
/* OpRegion mailbox #2: SWSCI */
struct opregion_swsci {
u32 scic; /* SWSCI command|status|data */
u32 parm; /* command parameters */
u32 dslp; /* driver sleep time-out */
u8 rsvd[244];
} __attribute__((packed));
/* OpRegion mailbox #3: ASLE */
struct opregion_asle {
u32 ardy; /* driver readiness */
u32 aslc; /* ASLE interrupt command */
u32 tche; /* technology enabled indicator */
u32 alsi; /* current ALS illuminance reading */
u32 bclp; /* backlight brightness to set */
u32 pfit; /* panel fitting state */
u32 cblv; /* current brightness level */
u16 bclm[20]; /* backlight level duty cycle mapping table */
u32 cpfm; /* current panel fitting mode */
u32 epfm; /* enabled panel fitting modes */
u8 plut[74]; /* panel LUT and identifier */
u32 pfmb; /* PWM freq and min brightness */
u8 rsvd[102];
} __attribute__((packed));
/* ASLE irq request bits */
#define ASLE_SET_ALS_ILLUM (1 << 0)
#define ASLE_SET_BACKLIGHT (1 << 1)
#define ASLE_SET_PFIT (1 << 2)
#define ASLE_SET_PWM_FREQ (1 << 3)
#define ASLE_REQ_MSK 0xf
/* response bits of ASLE irq request */
#define ASLE_ALS_ILLUM_FAIL (2<<10)
#define ASLE_BACKLIGHT_FAIL (2<<12)
#define ASLE_PFIT_FAIL (2<<14)
#define ASLE_PWM_FREQ_FAIL (2<<16)
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
#define ASLE_BCLP_MSK (~(1<<31))
/* ASLE panel fitting request */
#define ASLE_PFIT_VALID (1<<31)
#define ASLE_PFIT_CENTER (1<<0)
#define ASLE_PFIT_STRETCH_TEXT (1<<1)
#define ASLE_PFIT_STRETCH_GFX (1<<2)
/* PWM frequency and minimum brightness */
#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
#define ASLE_PFMB_PWM_VALID (1<<31)
#define ASLE_CBLV_VALID (1<<31)
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 blc_pwm_ctl, blc_pwm_ctl2;
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAIL;
bclp &= ASLE_BCLP_MSK;
if (bclp < 0 || bclp > 255)
return ASLE_BACKLIGHT_FAIL;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
else
I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
return 0;
}
static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
return 0;
}
static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (pfmb & ASLE_PFMB_PWM_VALID) {
u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
pwm = pwm >> 9;
/* FIXME - what do we do with the PWM? */
}
return 0;
}
static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
if (!(pfit & ASLE_PFIT_VALID))
return ASLE_PFIT_FAIL;
return 0;
}
void opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
asle_req = asle->aslc & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG("non asle set request??\n");
return;
}
if (asle_req & ASLE_SET_ALS_ILLUM)
asle_stat |= asle_set_als_illum(dev, asle->alsi);
if (asle_req & ASLE_SET_BACKLIGHT)
asle_stat |= asle_set_backlight(dev, asle->bclp);
if (asle_req & ASLE_SET_PFIT)
asle_stat |= asle_set_pfit(dev, asle->pfit);
if (asle_req & ASLE_SET_PWM_FREQ)
asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
asle->aslc = asle_stat;
}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
#define ASLE_PFIT_EN (1<<2)
#define ASLE_PFMB_EN (1<<3)
void opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
u32 pipeb_stats = I915_READ(PIPEBSTAT);
if (IS_MOBILE(dev)) {
/* Many devices trigger events with a write to the
legacy backlight controller, so we need to ensure
that it's able to generate interrupts */
I915_WRITE(PIPEBSTAT, pipeb_stats |=
I915_LEGACY_BLC_EVENT_ENABLE);
i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
} else
i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT);
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
asle->ardy = 1;
}
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
static struct intel_opregion *system_opregion;
int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
void *data)
{
/* The only video events relevant to opregion are 0x80. These indicate
either a docking event, lid switch or display switch request. In
Linux, these are handled by the dock, button and video drivers.
We might want to fix the video driver to be opregion-aware in
future, but right now we just indicate to the firmware that the
request has been handled */
struct opregion_acpi *acpi;
if (!system_opregion)
return NOTIFY_DONE;
acpi = system_opregion->acpi;
acpi->csts = 0;
return NOTIFY_OK;
}
static struct notifier_block intel_opregion_notifier = {
.notifier_call = intel_opregion_video_event,
};
int intel_opregion_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
void *base;
u32 asls, mboxes;
int err = 0;
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
DRM_DEBUG("ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
base = ioremap(asls, OPREGION_SZ);
if (!base)
return -ENOMEM;
opregion->header = base;
if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
} else {
DRM_DEBUG("Public ACPI methods not supported\n");
err = -ENOTSUPP;
goto err_out;
}
opregion->enabled = 1;
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
}
if (mboxes & MBOX_ASLE) {
DRM_DEBUG("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
}
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
* We don't actually need to do anything with them. */
opregion->acpi->csts = 0;
opregion->acpi->drdy = 1;
system_opregion = opregion;
register_acpi_notifier(&intel_opregion_notifier);
return 0;
err_out:
iounmap(opregion->header);
opregion->header = NULL;
return err;
}
void intel_opregion_free(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->enabled)
return;
opregion->acpi->drdy = 0;
system_opregion = NULL;
unregister_acpi_notifier(&intel_opregion_notifier);
/* just clear all opregion memory pointers now */
iounmap(opregion->header);
opregion->header = NULL;
opregion->acpi = NULL;
opregion->swsci = NULL;
opregion->asle = NULL;
opregion->enabled = 0;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,509 @@
/*
*
* Copyright 2008 (c) Intel Corporation
* Jesse Barnes <jbarnes@virtuousgeek.org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (pipe == PIPE_A)
return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
else
return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
}
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE8(index_port, reg);
return I915_READ8(data_port);
}
static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
return I915_READ8(VGA_AR_DATA_READ);
}
static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
I915_WRITE8(VGA_AR_DATA_WRITE, val);
}
static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE8(index_port, reg);
I915_WRITE8(data_port, val);
}
static void i915_save_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* VGA color palette registers */
dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
/* DACCRX automatically increments during read */
I915_WRITE8(VGA_DACRX, 0);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
/* MSR bits */
dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* CRT controller regs */
i915_write_indexed(dev, cr_index, cr_data, 0x11,
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
dev_priv->saveCR[i] =
i915_read_indexed(dev, cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
dev_priv->saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
I915_READ8(st01);
dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
I915_READ8(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
dev_priv->saveGR[i] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
dev_priv->saveGR[0x10] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
dev_priv->saveGR[0x11] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
dev_priv->saveGR[0x18] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
dev_priv->saveSR[i] =
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
}
static void i915_restore_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* MSR bits */
I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
dev_priv->saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
dev_priv->saveGR[i]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
dev_priv->saveGR[0x10]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
dev_priv->saveGR[0x11]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
dev_priv->saveGR[0x18]);
/* Attribute controller registers */
I915_READ8(st01); /* switch back to index mode */
for (i = 0; i <= 0x14; i++)
i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
I915_READ8(st01); /* switch back to index mode */
I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
I915_READ8(st01);
/* VGA color palette registers */
I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
/* DACCRX automatically increments during read */
I915_WRITE8(VGA_DACWX, 0);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
}
int i915_save_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Display arbitration control */
dev_priv->saveDSPARB = I915_READ(DSPARB);
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
dev_priv->saveFPA0 = I915_READ(FPA0);
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
if (IS_I965G(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
if (IS_I965G(dev)) {
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
dev_priv->saveFPB0 = I915_READ(FPB0);
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
if (IS_I965G(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
if (IS_I965GM(dev) || IS_GM45(dev)) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
/* CRT state */
dev_priv->saveADPA = I915_READ(ADPA);
/* LVDS state */
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (IS_I965G(dev))
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->saveLVDS = I915_READ(LVDS);
if (!IS_I830(dev) && !IS_845G(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
/* FIXME: save TV & SDVO state */
/* FBC state */
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
/* Interrupt state */
dev_priv->saveIIR = I915_READ(IIR);
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
/* VGA state */
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
/* Clock gating state */
dev_priv->saveD_STATE = I915_READ(D_STATE);
dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
i915_save_vga(dev);
return 0;
}
int i915_restore_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
}
I915_WRITE(FPA0, dev_priv->saveFPA0);
I915_WRITE(FPA1, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
DRM_UDELAY(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
DRM_UDELAY(150);
}
I915_WRITE(FPB0, dev_priv->saveFPB0);
I915_WRITE(FPB1, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
DRM_UDELAY(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
DRM_UDELAY(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
/* Restore plane info */
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
/* CRT state */
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
if (IS_I965G(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
/* FIXME: restore TV & SDVO state */
/* FBC info */
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
/* VGA state */
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
DRM_UDELAY(150);
/* Clock gating state */
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
i915_restore_vga(dev);
return 0;
}

View file

@ -45,15 +45,16 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_mga_buf_priv_t),
.load = mga_driver_load,
.unload = mga_driver_unload,
.lastclose = mga_driver_lastclose,
.dma_quiescent = mga_driver_dma_quiescent,
.device_is_agp = mga_driver_device_is_agp,
.vblank_wait = mga_driver_vblank_wait,
.get_vblank_counter = mga_get_vblank_counter,
.enable_vblank = mga_enable_vblank,
.disable_vblank = mga_disable_vblank,
.irq_preinstall = mga_driver_irq_preinstall,
.irq_postinstall = mga_driver_irq_postinstall,
.irq_uninstall = mga_driver_irq_uninstall,
@ -64,20 +65,20 @@ static struct drm_driver driver = {
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = mga_compat_ioctl,
.compat_ioctl = mga_compat_ioctl,
#endif
},
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.name = DRIVER_NAME,
.id_table = pciidlist,
},
.name = DRIVER_NAME,

View file

@ -120,6 +120,7 @@ typedef struct drm_mga_private {
u32 clear_cmd;
u32 maccess;
atomic_t vbl_received; /**< Number of vblanks received. */
wait_queue_head_t fence_queue;
atomic_t last_fence_retired;
u32 next_fence_to_post;
@ -181,11 +182,14 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
extern int mga_warp_init(drm_mga_private_t * dev_priv);
/* mga_irq.c */
extern int mga_enable_vblank(struct drm_device *dev, int crtc);
extern void mga_disable_vblank(struct drm_device *dev, int crtc);
extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
extern void mga_driver_irq_preinstall(struct drm_device * dev);
extern void mga_driver_irq_postinstall(struct drm_device * dev);
extern int mga_driver_irq_postinstall(struct drm_device *dev);
extern void mga_driver_irq_uninstall(struct drm_device * dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);

View file

@ -1,5 +1,6 @@
/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
*
*/
/*
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
@ -35,6 +36,18 @@
#include "mga_drm.h"
#include "mga_drv.h"
u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
{
const drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
if (crtc != 0)
return 0;
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@ -47,9 +60,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* VBLANK interrupt */
if (status & MGA_VLINEPEN) {
MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
handled = 1;
}
@ -58,6 +70,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
const u32 prim_end = MGA_READ(MGA_PRIMEND);
MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
/* In addition to clearing the interrupt-pending bit, we
@ -72,28 +85,39 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
handled = 1;
}
if (handled) {
if (handled)
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
int mga_enable_vblank(struct drm_device *dev, int crtc)
{
unsigned int cur_vblank;
int ret = 0;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
if (crtc != 0) {
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
return 0;
}
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
return 0;
}
void mga_disable_vblank(struct drm_device *dev, int crtc)
{
if (crtc != 0) {
DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
crtc);
}
/* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have
* a nice hardware counter that tracks the number of refreshes when
* the interrupt is disabled, and the kernel doesn't know the refresh
* rate to calculate an estimate.
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
return ret;
/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
}
int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
@ -125,14 +149,22 @@ void mga_driver_irq_preinstall(struct drm_device * dev)
MGA_WRITE(MGA_ICLEAR, ~0);
}
void mga_driver_irq_postinstall(struct drm_device * dev)
int mga_driver_irq_postinstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
int ret;
ret = drm_vblank_init(dev, 1);
if (ret)
return ret;
DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
/* Turn on vertical blank interrupt and soft trap interrupt. */
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
/* Turn on soft trap interrupt. Vertical blank interrupts are enabled
* in mga_enable_vblank.
*/
MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
return 0;
}
void mga_driver_irq_uninstall(struct drm_device * dev)

View file

@ -1022,7 +1022,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
switch (param->param) {
case MGA_PARAM_IRQ_NR:
value = dev->irq;
value = drm_dev_to_irq(dev);
break;
case MGA_PARAM_CARD_TYPE:
value = dev_priv->chipset;

View file

@ -43,12 +43,13 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
.preclose = r128_driver_preclose,
.lastclose = r128_driver_lastclose,
.vblank_wait = r128_driver_vblank_wait,
.get_vblank_counter = r128_get_vblank_counter,
.enable_vblank = r128_enable_vblank,
.disable_vblank = r128_disable_vblank,
.irq_preinstall = r128_driver_irq_preinstall,
.irq_postinstall = r128_driver_irq_postinstall,
.irq_uninstall = r128_driver_irq_uninstall,
@ -59,21 +60,20 @@ static struct drm_driver driver = {
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = r128_compat_ioctl,
.compat_ioctl = r128_compat_ioctl,
#endif
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.name = DRIVER_NAME,
.id_table = pciidlist,
},
.name = DRIVER_NAME,
@ -87,6 +87,7 @@ static struct drm_driver driver = {
static int __init r128_init(void)
{
driver.num_ioctls = r128_max_ioctl;
return drm_init(&driver);
}

View file

@ -29,7 +29,7 @@
* Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* Michel Dänzer <daenzerm@student.ethz.ch>
* Michel D<EFBFBD>zer <daenzerm@student.ethz.ch>
*/
#ifndef __R128_DRV_H__
@ -97,6 +97,8 @@ typedef struct drm_r128_private {
u32 crtc_offset;
u32 crtc_offset_cntl;
atomic_t vbl_received;
u32 color_fmt;
unsigned int front_offset;
unsigned int front_pitch;
@ -149,11 +151,12 @@ extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
extern int r128_do_cleanup_cce(struct drm_device * dev);
extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern int r128_enable_vblank(struct drm_device *dev, int crtc);
extern void r128_disable_vblank(struct drm_device *dev, int crtc);
extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
extern void r128_driver_irq_preinstall(struct drm_device * dev);
extern void r128_driver_irq_postinstall(struct drm_device * dev);
extern int r128_driver_irq_postinstall(struct drm_device *dev);
extern void r128_driver_irq_uninstall(struct drm_device * dev);
extern void r128_driver_lastclose(struct drm_device * dev);
extern void r128_driver_preclose(struct drm_device * dev,

View file

@ -35,6 +35,16 @@
#include "r128_drm.h"
#include "r128_drv.h"
u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
{
const drm_r128_private_t *dev_priv = dev->dev_private;
if (crtc != 0)
return 0;
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@ -46,30 +56,38 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
/* VBLANK interrupt */
if (status & R128_CRTC_VBLANK_INT) {
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
int r128_enable_vblank(struct drm_device *dev, int crtc)
{
unsigned int cur_vblank;
int ret = 0;
drm_r128_private_t *dev_priv = dev->dev_private;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
if (crtc != 0) {
DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
return -EINVAL;
}
R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
return 0;
}
void r128_disable_vblank(struct drm_device *dev, int crtc)
{
if (crtc != 0)
DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
/*
* FIXME: implement proper interrupt disable by using the vblank
* counter register (if available)
*
* R128_WRITE(R128_GEN_INT_CNTL,
* R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
return ret;
}
void r128_driver_irq_preinstall(struct drm_device * dev)
@ -82,12 +100,9 @@ void r128_driver_irq_preinstall(struct drm_device * dev)
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
}
void r128_driver_irq_postinstall(struct drm_device * dev)
int r128_driver_irq_postinstall(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
/* Turn on VBL interrupt */
R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
return drm_vblank_init(dev, 1);
}
void r128_driver_irq_uninstall(struct drm_device * dev)

View file

@ -1629,7 +1629,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
switch (param->param) {
case R128_PARAM_IRQ_NR:
value = dev->irq;
value = drm_dev_to_irq(dev);
break;
default:
return -EINVAL;

View file

@ -71,7 +71,8 @@ static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
return RS690_READ_MCIND(dev_priv, addr);
else
return RS480_READ_MCIND(dev_priv, addr);
@ -82,7 +83,8 @@ u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
@ -94,7 +96,8 @@ static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
{
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
@ -106,7 +109,8 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo
{
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
@ -122,15 +126,17 @@ static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480) {
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
RADEON_WRITE(RS480_AGP_BASE_2, 0);
RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
} else {
RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
@ -347,6 +353,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
DRM_INFO("Loading R300 Microcode\n");
for (i = 0; i < 256; i++) {
@ -356,6 +363,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
R300_cp_microcode[i][0]);
}
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
DRM_INFO("Loading R400 Microcode\n");
for (i = 0; i < 256; i++) {
@ -364,8 +372,9 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
R420_cp_microcode[i][0]);
}
} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
DRM_INFO("Loading RS690 Microcode\n");
} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
DRM_INFO("Loading RS690/RS740 Microcode\n");
for (i = 0; i < 256; i++) {
RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
RS690_cp_microcode[i][1]);
@ -626,8 +635,6 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
dev_priv->ring.size_l2qw);
#endif
/* Start with assuming that writeback doesn't work */
dev_priv->writeback_works = 0;
/* Initialize the scratch register pointer. This will cause
* the scratch register values to be written out to memory
@ -646,8 +653,18 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
/* Turn on bus mastering */
tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
RADEON_WRITE(RADEON_BUS_CNTL, tmp);
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
/* rs400, rs690/rs740 */
tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS400_BUS_MASTER_DIS;
RADEON_WRITE(RADEON_BUS_CNTL, tmp);
} else if (!(((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R423))) {
/* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */
tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
RADEON_WRITE(RADEON_BUS_CNTL, tmp);
} /* PCIE cards appears to not need this */
dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
@ -674,6 +691,9 @@ static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
{
u32 tmp;
/* Start with assuming that writeback doesn't work */
dev_priv->writeback_works = 0;
/* Writeback doesn't seem to work everywhere, test it here and possibly
* enable it if it appears to work
*/
@ -719,7 +739,8 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
dev_priv->gart_size);
temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
RS690_BLOCK_GFX_D3_EN));
else
@ -812,6 +833,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
u32 tmp;
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
(dev_priv->flags & RADEON_IS_IGPGART)) {
radeon_set_igpgart(dev_priv, on);
return;
@ -1286,7 +1308,7 @@ static int radeon_do_resume_cp(struct drm_device * dev)
radeon_cp_init_ring_buffer(dev, dev_priv);
radeon_do_engine_reset(dev);
radeon_enable_interrupt(dev);
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
DRM_DEBUG("radeon_do_resume_cp() complete\n");
@ -1708,6 +1730,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
case CHIP_R300:
case CHIP_R350:
case CHIP_R420:
case CHIP_R423:
case CHIP_RV410:
case CHIP_RV515:
case CHIP_R520:

View file

@ -52,6 +52,28 @@ static int dri_library_name(struct drm_device *dev, char *buf)
"r300"));
}
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
/* Disable *all* interrupts */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
return 0;
}
static int radeon_resume(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
/* Restore interrupt registers */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
return 0;
}
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
@ -59,8 +81,7 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.load = radeon_driver_load,
.firstopen = radeon_driver_firstopen,
@ -69,8 +90,11 @@ static struct drm_driver driver = {
.postclose = radeon_driver_postclose,
.lastclose = radeon_driver_lastclose,
.unload = radeon_driver_unload,
.vblank_wait = radeon_driver_vblank_wait,
.vblank_wait2 = radeon_driver_vblank_wait2,
.suspend = radeon_suspend,
.resume = radeon_resume,
.get_vblank_counter = radeon_get_vblank_counter,
.enable_vblank = radeon_enable_vblank,
.disable_vblank = radeon_disable_vblank,
.dri_library_name = dri_library_name,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,

View file

@ -122,9 +122,12 @@ enum radeon_family {
CHIP_RV350,
CHIP_RV380,
CHIP_R420,
CHIP_R423,
CHIP_RV410,
CHIP_RS400,
CHIP_RS480,
CHIP_RS690,
CHIP_RS740,
CHIP_RV515,
CHIP_R520,
CHIP_RV530,
@ -378,17 +381,17 @@ extern void radeon_mem_release(struct drm_file *file_priv,
struct mem_block *heap);
/* radeon_irq.c */
extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern void radeon_do_release(struct drm_device * dev);
extern int radeon_driver_vblank_wait(struct drm_device * dev,
unsigned int *sequence);
extern int radeon_driver_vblank_wait2(struct drm_device * dev,
unsigned int *sequence);
extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
extern void radeon_driver_irq_preinstall(struct drm_device * dev);
extern void radeon_driver_irq_postinstall(struct drm_device * dev);
extern int radeon_driver_irq_postinstall(struct drm_device *dev);
extern void radeon_driver_irq_uninstall(struct drm_device * dev);
extern void radeon_enable_interrupt(struct drm_device *dev);
extern int radeon_vblank_crtc_get(struct drm_device *dev);
@ -397,19 +400,22 @@ extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
extern int radeon_driver_unload(struct drm_device *dev);
extern int radeon_driver_firstopen(struct drm_device *dev);
extern void radeon_driver_preclose(struct drm_device * dev, struct drm_file *file_priv);
extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp);
extern void radeon_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
extern void radeon_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv);
extern void radeon_driver_lastclose(struct drm_device * dev);
extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv);
extern int radeon_driver_open(struct drm_device *dev,
struct drm_file *file_priv);
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
/* r300_cmdbuf.c */
extern void r300_init_reg_flags(struct drm_device *dev);
extern int r300_do_cp_cmdbuf(struct drm_device * dev,
extern int r300_do_cp_cmdbuf(struct drm_device *dev,
struct drm_file *file_priv,
drm_radeon_kcmd_buffer_t * cmdbuf);
drm_radeon_kcmd_buffer_t *cmdbuf);
/* Flags for stats.boxes
*/
@ -434,8 +440,31 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
# define RADEON_SCISSOR_1_ENABLE (1 << 29)
# define RADEON_SCISSOR_2_ENABLE (1 << 30)
/*
* PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx)
* don't have an explicit bus mastering disable bit. It's handled
* by the PCI D-states. PMI_BM_DIS disables D-state bus master
* handling, not bus mastering itself.
*/
#define RADEON_BUS_CNTL 0x0030
/* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */
# define RADEON_BUS_MASTER_DIS (1 << 6)
/* rs400, rs690/rs740 */
# define RS400_BUS_MASTER_DIS (1 << 14)
# define RS400_MSI_REARM (1 << 20)
/* see RS480_MSI_REARM in AIC_CNTL for rs480 */
#define RADEON_BUS_CNTL1 0x0034
# define RADEON_PMI_BM_DIS (1 << 2)
# define RADEON_PMI_INT_DIS (1 << 3)
#define RV370_BUS_CNTL 0x004c
# define RV370_PMI_BM_DIS (1 << 5)
# define RV370_PMI_INT_DIS (1 << 6)
#define RADEON_MSI_REARM_EN 0x0160
/* rv370/rv380, rv410, r423/r430/r480, r5xx */
# define RV370_MSI_REARM_EN (1 << 0)
#define RADEON_CLOCK_CNTL_DATA 0x000c
# define RADEON_PLL_WR_EN (1 << 7)
@ -623,6 +652,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
# define RADEON_SW_INT_TEST (1 << 25)
# define RADEON_SW_INT_TEST_ACK (1 << 25)
# define RADEON_SW_INT_FIRE (1 << 26)
# define R500_DISPLAY_INT_STATUS (1 << 0)
#define RADEON_HOST_PATH_CNTL 0x0130
# define RADEON_HDP_SOFT_RESET (1 << 26)
@ -907,6 +937,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define RADEON_AIC_CNTL 0x01d0
# define RADEON_PCIGART_TRANSLATE_EN (1 << 0)
# define RS480_MSI_REARM (1 << 3)
#define RADEON_AIC_STAT 0x01d4
#define RADEON_AIC_PT_BASE 0x01d8
#define RADEON_AIC_LO_ADDR 0x01dc
@ -1116,6 +1147,9 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define R200_VAP_PVS_CNTL_1 0x22D0
#define RADEON_CRTC_CRNT_FRAME 0x0214
#define RADEON_CRTC2_CRNT_FRAME 0x0314
#define R500_D1CRTC_STATUS 0x609c
#define R500_D2CRTC_STATUS 0x689c
#define R500_CRTC_V_BLANK (1<<0)
@ -1200,7 +1234,8 @@ do { \
#define IGP_WRITE_MCIND(addr, val) \
do { \
if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \
if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || \
((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) \
RS690_WRITE_MCIND(addr, val); \
else \
RS480_WRITE_MCIND(addr, val); \

View file

@ -27,7 +27,7 @@
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
* Michel Dänzer <michel@daenzer.net>
* Michel D<EFBFBD>zer <michel@daenzer.net>
*/
#include "drmP.h"
@ -35,12 +35,128 @@
#include "radeon_drm.h"
#include "radeon_drv.h"
static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
u32 mask)
void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
{
u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask;
drm_radeon_private_t *dev_priv = dev->dev_private;
if (state)
dev_priv->irq_enable_reg |= mask;
else
dev_priv->irq_enable_reg &= ~mask;
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
}
static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if (state)
dev_priv->r500_disp_irq_reg |= mask;
else
dev_priv->r500_disp_irq_reg &= ~mask;
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
}
int radeon_enable_vblank(struct drm_device *dev, int crtc)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
switch (crtc) {
case 0:
r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
break;
case 1:
r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
return EINVAL;
}
} else {
switch (crtc) {
case 0:
radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
break;
case 1:
radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
return EINVAL;
}
}
return 0;
}
void radeon_disable_vblank(struct drm_device *dev, int crtc)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
switch (crtc) {
case 0:
r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
break;
case 1:
r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
break;
}
} else {
switch (crtc) {
case 0:
radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
break;
case 1:
radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
break;
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
break;
}
}
}
static inline u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
{
u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
u32 irq_mask = RADEON_SW_INT_TEST;
*r500_disp_int = 0;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
/* vbl interrupts in a different place */
if (irqs & R500_DISPLAY_INT_STATUS) {
/* if a display interrupt */
u32 disp_irq;
disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
*r500_disp_int = disp_irq;
if (disp_irq & R500_D1_VBLANK_INTERRUPT)
RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
if (disp_irq & R500_D2_VBLANK_INTERRUPT)
RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
}
irq_mask |= R500_DISPLAY_INT_STATUS;
} else
irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
irqs &= irq_mask;
if (irqs)
RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
return irqs;
}
@ -68,44 +184,33 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
u32 stat;
u32 r500_disp_int;
/* Only consider the bits we're interested in - others could be used
* outside the DRM
*/
stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
RADEON_CRTC_VBLANK_STAT |
RADEON_CRTC2_VBLANK_STAT));
stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
if (!stat)
return IRQ_NONE;
stat &= dev_priv->irq_enable_reg;
/* SW interrupt */
if (stat & RADEON_SW_INT_TEST) {
if (stat & RADEON_SW_INT_TEST)
DRM_WAKEUP(&dev_priv->swi_queue);
}
/* VBLANK interrupt */
if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
int vblank_crtc = dev_priv->vblank_crtc;
if ((vblank_crtc &
(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
if (stat & RADEON_CRTC_VBLANK_STAT)
atomic_inc(&dev->vbl_received);
if (stat & RADEON_CRTC2_VBLANK_STAT)
atomic_inc(&dev->vbl_received2);
} else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
(vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
((stat & RADEON_CRTC2_VBLANK_STAT) &&
(vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
drm_handle_vblank(dev, 0);
if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
drm_handle_vblank(dev, 1);
} else {
if (stat & RADEON_CRTC_VBLANK_STAT)
drm_handle_vblank(dev, 0);
if (stat & RADEON_CRTC2_VBLANK_STAT)
drm_handle_vblank(dev, 1);
}
return IRQ_HANDLED;
}
@ -144,54 +249,31 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
return ret;
}
static int radeon_driver_vblank_do_wait(struct drm_device * dev,
unsigned int *sequence, int crtc)
u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
int ack = 0;
atomic_t *counter;
drm_radeon_private_t *dev_priv = dev->dev_private;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (crtc == DRM_RADEON_VBLANK_CRTC1) {
counter = &dev->vbl_received;
ack |= RADEON_CRTC_VBLANK_STAT;
} else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
counter = &dev->vbl_received2;
ack |= RADEON_CRTC2_VBLANK_STAT;
} else
if (crtc < 0 || crtc > 1) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
radeon_acknowledge_irqs(dev_priv, ack);
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(counter))
- *sequence) <= (1 << 23)));
*sequence = cur_vblank;
return ret;
}
int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
{
return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
}
int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
{
return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
if (crtc == 0)
return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
else
return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
} else {
if (crtc == 0)
return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
else
return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
}
}
/* Needs the lock as it touches the ring.
@ -234,46 +316,41 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr
return radeon_wait_irq(dev, irqwait->irq_seq);
}
void radeon_enable_interrupt(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
dev_priv->irq_enabled = 1;
}
/* drm_dma.h hooks
*/
void radeon_driver_irq_preinstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
u32 dummy;
/* Disable *all* interrupts */
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
/* Clear bits if they're already high */
radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
RADEON_CRTC_VBLANK_STAT |
RADEON_CRTC2_VBLANK_STAT));
radeon_acknowledge_irqs(dev_priv, &dummy);
}
void radeon_driver_irq_postinstall(struct drm_device * dev)
int radeon_driver_irq_postinstall(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
int ret;
atomic_set(&dev_priv->swi_emitted, 0);
DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
radeon_enable_interrupt(dev);
ret = drm_vblank_init(dev, 2);
if (ret)
return ret;
dev->max_vblank_count = 0x001fffff;
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
return 0;
}
void radeon_driver_irq_uninstall(struct drm_device * dev)
@ -285,6 +362,8 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
dev_priv->irq_enabled = 0;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
/* Disable *all* interrupts */
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
}
@ -293,18 +372,8 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
int radeon_vblank_crtc_get(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
u32 flag;
u32 value;
flag = RADEON_READ(RADEON_GEN_INT_CNTL);
value = 0;
if (flag & RADEON_CRTC_VBLANK_MASK)
value |= DRM_RADEON_VBLANK_CRTC1;
if (flag & RADEON_CRTC2_VBLANK_MASK)
value |= DRM_RADEON_VBLANK_CRTC2;
return value;
return dev_priv->vblank_crtc;
}
int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
@ -315,6 +384,5 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
return -EINVAL;
}
dev_priv->vblank_crtc = (unsigned int)value;
radeon_enable_interrupt(dev);
return 0;
}

View file

@ -2997,7 +2997,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
value = GET_SCRATCH(2);
break;
case RADEON_PARAM_IRQ_NR:
value = dev->irq;
value = drm_dev_to_irq(dev);
break;
case RADEON_PARAM_GART_BASE:
value = dev_priv->gart_vm_start;

View file

@ -41,7 +41,7 @@
#define AGP_TYPE 1
#if defined(CONFIG_FB_SIS)
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
/* fb management via fb device */
#define SIS_MM_ALIGN_SHIFT 0
@ -57,7 +57,7 @@ static void *sis_sman_mm_allocate(void *private, unsigned long size,
if (req.size == 0)
return NULL;
else
return (void *)~req.offset;
return (void *)(unsigned long)~req.offset;
}
static void sis_sman_mm_free(void *private, void *ref)
@ -75,12 +75,12 @@ static unsigned long sis_sman_mm_offset(void *private, void *ref)
return ~((unsigned long)ref);
}
#else /* CONFIG_FB_SIS */
#else /* CONFIG_FB_SIS[_MODULE] */
#define SIS_MM_ALIGN_SHIFT 4
#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
#endif /* CONFIG_FB_SIS */
#endif /* CONFIG_FB_SIS[_MODULE] */
static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
@ -89,7 +89,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
int ret;
mutex_lock(&dev->struct_mutex);
#if defined(CONFIG_FB_SIS)
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
{
struct drm_sman_mm sman_mm;
sman_mm.private = (void *)0xFFFFFFFF;

View file

@ -40,11 +40,13 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
DRIVER_IRQ_SHARED,
.load = via_driver_load,
.unload = via_driver_unload,
.context_dtor = via_final_context,
.vblank_wait = via_driver_vblank_wait,
.get_vblank_counter = via_get_vblank_counter,
.enable_vblank = via_enable_vblank,
.disable_vblank = via_disable_vblank,
.irq_preinstall = via_driver_irq_preinstall,
.irq_postinstall = via_driver_irq_postinstall,
.irq_uninstall = via_driver_irq_uninstall,
@ -59,17 +61,17 @@ static struct drm_driver driver = {
.get_reg_ofs = drm_core_get_reg_ofs,
.ioctls = via_ioctls,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
},
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.name = DRIVER_NAME,
.id_table = pciidlist,
},
.name = DRIVER_NAME,

View file

@ -75,6 +75,7 @@ typedef struct drm_via_private {
struct timeval last_vblank;
int last_vblank_valid;
unsigned usec_per_vblank;
atomic_t vbl_received;
drm_via_state_t hc_state;
char pci_buf[VIA_PCI_BUF_SIZE];
const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
@ -130,21 +131,24 @@ extern int via_init_context(struct drm_device * dev, int context);
extern int via_final_context(struct drm_device * dev, int context);
extern int via_do_cleanup_map(struct drm_device * dev);
extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
extern int via_enable_vblank(struct drm_device *dev, int crtc);
extern void via_disable_vblank(struct drm_device *dev, int crtc);
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
extern void via_driver_irq_preinstall(struct drm_device * dev);
extern void via_driver_irq_postinstall(struct drm_device * dev);
extern int via_driver_irq_postinstall(struct drm_device *dev);
extern void via_driver_irq_uninstall(struct drm_device * dev);
extern int via_dma_cleanup(struct drm_device * dev);
extern void via_init_command_verifier(void);
extern int via_driver_dma_quiescent(struct drm_device * dev);
extern void via_init_futex(drm_via_private_t * dev_priv);
extern void via_cleanup_futex(drm_via_private_t * dev_priv);
extern void via_release_futex(drm_via_private_t * dev_priv, int context);
extern void via_init_futex(drm_via_private_t *dev_priv);
extern void via_cleanup_futex(drm_via_private_t *dev_priv);
extern void via_release_futex(drm_via_private_t *dev_priv, int context);
extern void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv);
extern void via_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
extern void via_lastclose(struct drm_device *dev);
extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);

View file

@ -43,7 +43,7 @@
#define VIA_REG_INTERRUPT 0x200
/* VIA_REG_INTERRUPT */
#define VIA_IRQ_GLOBAL (1 << 31)
#define VIA_IRQ_GLOBAL (1 << 31)
#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
#define VIA_IRQ_VBLANK_PENDING (1 << 3)
#define VIA_IRQ_HQV0_ENABLE (1 << 11)
@ -68,16 +68,15 @@
static maskarray_t via_pro_group_a_irqs[] = {
{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
0x00000000},
0x00000000 },
{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
0x00000000},
0x00000000 },
{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
};
static int via_num_pro_group_a =
sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
static maskarray_t via_unichrome_irqs[] = {
@ -86,14 +85,24 @@ static maskarray_t via_unichrome_irqs[] = {
{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
};
static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
static unsigned time_diff(struct timeval *now, struct timeval *then)
{
return (now->tv_usec >= then->tv_usec) ?
now->tv_usec - then->tv_usec :
1000000 - (then->tv_usec - now->tv_usec);
now->tv_usec - then->tv_usec :
1000000 - (then->tv_usec - now->tv_usec);
}
u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
{
drm_via_private_t *dev_priv = dev->dev_private;
if (crtc != 0)
return 0;
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
@ -108,23 +117,22 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
status = VIA_READ(VIA_REG_INTERRUPT);
if (status & VIA_IRQ_VBLANK_PENDING) {
atomic_inc(&dev->vbl_received);
if (!(atomic_read(&dev->vbl_received) & 0x0F)) {
atomic_inc(&dev_priv->vbl_received);
if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
do_gettimeofday(&cur_vblank);
if (dev_priv->last_vblank_valid) {
dev_priv->usec_per_vblank =
time_diff(&cur_vblank,
&dev_priv->last_vblank) >> 4;
time_diff(&cur_vblank,
&dev_priv->last_vblank) >> 4;
}
dev_priv->last_vblank = cur_vblank;
dev_priv->last_vblank_valid = 1;
}
if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
DRM_DEBUG("US per vblank is: %u\n",
dev_priv->usec_per_vblank);
}
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
drm_handle_vblank(dev, 0);
handled = 1;
}
@ -145,6 +153,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
/* Acknowlege interrupts */
VIA_WRITE(VIA_REG_INTERRUPT, status);
if (handled)
return IRQ_HANDLED;
else
@ -163,31 +172,34 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
}
}
int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
int via_enable_vblank(struct drm_device *dev, int crtc)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
drm_via_private_t *dev_priv = dev->dev_private;
u32 status;
DRM_DEBUG("\n");
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
if (crtc != 0) {
DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
return -EINVAL;
}
viadrv_acknowledge_irqs(dev_priv);
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received)) -
*sequence) <= (1 << 23)));
return 0;
}
*sequence = cur_vblank;
return ret;
void via_disable_vblank(struct drm_device *dev, int crtc)
{
drm_via_private_t *dev_priv = dev->dev_private;
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
if (crtc != 0)
DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
}
static int
@ -239,6 +251,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
return ret;
}
/*
* drm_dma.h hooks
*/
@ -292,23 +305,25 @@ void via_driver_irq_preinstall(struct drm_device * dev)
}
}
void via_driver_irq_postinstall(struct drm_device * dev)
int via_driver_irq_postinstall(struct drm_device *dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
DRM_DEBUG("\n");
if (dev_priv) {
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
| dev_priv->irq_enable_mask);
DRM_DEBUG("via_driver_irq_postinstall\n");
if (!dev_priv)
return -EINVAL;
/* Some magic, oh for some data sheets ! */
drm_vblank_init(dev, 1);
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
| dev_priv->irq_enable_mask);
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
/* Some magic, oh for some data sheets ! */
VIA_WRITE8(0x83d4, 0x11);
VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
}
return 0;
}
void via_driver_irq_uninstall(struct drm_device * dev)
@ -339,9 +354,6 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int force_sequence;
if (!dev->irq)
return -EINVAL;
if (irqwait->request.irq >= dev_priv->num_irqs) {
DRM_ERROR("Trying to wait on unknown irq %d\n",
irqwait->request.irq);
@ -352,7 +364,8 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
case VIA_IRQ_RELATIVE:
irqwait->request.sequence += atomic_read(&cur_irq->irq_received);
irqwait->request.sequence +=
atomic_read(&cur_irq->irq_received);
irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
case VIA_IRQ_ABSOLUTE:
break;

View file

@ -93,8 +93,7 @@ int via_final_context(struct drm_device *dev, int context)
/* Last context, perform cleanup */
if (dev->ctx_count == 1 && dev->dev_private) {
DRM_DEBUG("Last Context\n");
if (dev->irq)
drm_irq_uninstall(dev);
drm_irq_uninstall(dev);
via_cleanup_futex(dev_priv);
via_do_cleanup_map(dev);
}

View file

@ -36,7 +36,6 @@
#ifndef _DRM_H_
#define _DRM_H_
#if defined(__linux__)
#if defined(__KERNEL__)
#endif
#include <asm/ioctl.h> /* For _IO* macros */
@ -46,22 +45,6 @@
#define DRM_IOC_WRITE _IOC_WRITE
#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
#if defined(__FreeBSD__) && defined(IN_MODULE)
/* Prevent name collision when including sys/ioccom.h */
#undef ioctl
#include <sys/ioccom.h>
#define ioctl(a,b,c) xf86ioctl(a,b,c)
#else
#include <sys/ioccom.h>
#endif /* __FreeBSD__ && xf86ioctl */
#define DRM_IOCTL_NR(n) ((n) & 0xff)
#define DRM_IOC_VOID IOC_VOID
#define DRM_IOC_READ IOC_OUT
#define DRM_IOC_WRITE IOC_IN
#define DRM_IOC_READWRITE IOC_INOUT
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
#endif
#define DRM_MAJOR 226
#define DRM_MAX_MINOR 15
@ -471,6 +454,7 @@ struct drm_irq_busid {
enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
@ -503,6 +487,19 @@ union drm_wait_vblank {
struct drm_wait_vblank_reply reply;
};
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
/**
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
*/
struct drm_modeset_ctl {
uint32_t crtc;
uint32_t cmd;
};
/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
@ -573,6 +570,34 @@ struct drm_set_version {
int drm_dd_minor;
};
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
uint32_t handle;
uint32_t pad;
};
/** DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
uint32_t handle;
/** Returned global name */
uint32_t name;
};
/** DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
uint32_t name;
/** Returned handle for the object */
uint32_t handle;
/** Returned size of the object */
uint64_t size;
};
#define DRM_IOCTL_BASE 'd'
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
@ -587,6 +612,10 @@ struct drm_set_version {
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)

View file

@ -104,6 +104,7 @@ struct drm_device;
#define DRIVER_DMA_QUEUE 0x200
#define DRIVER_FB_DMA 0x400
#define DRIVER_IRQ_VBL2 0x800
#define DRIVER_GEM 0x1000
/***********************************************************************/
/** \name Begin the DRM... */
@ -387,6 +388,10 @@ struct drm_file {
struct drm_minor *minor;
int remove_auth_on_close;
unsigned long lock_count;
/** Mapping of mm object handles to object pointers. */
struct idr object_idr;
/** Lock for synchronization of access to object_idr. */
spinlock_t table_lock;
struct file *filp;
void *driver_priv;
};
@ -557,6 +562,56 @@ struct drm_ati_pcigart_info {
int table_size;
};
/**
* This structure defines the drm_mm memory object, which will be used by the
* DRM for its buffer objects.
*/
struct drm_gem_object {
/** Reference count of this object */
struct kref refcount;
/** Handle count of this object. Each handle also holds a reference */
struct kref handlecount;
/** Related drm device */
struct drm_device *dev;
/** File representing the shmem storage */
struct file *filp;
/**
* Size of the object, in bytes. Immutable over the object's
* lifetime.
*/
size_t size;
/**
* Global name for this object, starts at 1. 0 means unnamed.
* Access is covered by the object_name_lock in the related drm_device
*/
int name;
/**
* Memory domains. These monitor which caches contain read/write data
* related to the object. When transitioning from one set of domains
* to another, the driver is called to ensure that caches are suitably
* flushed and invalidated
*/
uint32_t read_domains;
uint32_t write_domain;
/**
* While validating an exec operation, the
* new read/write domain values are computed here.
* They will be transferred to the above values
* at the point that any cache flushing occurs
*/
uint32_t pending_read_domains;
uint32_t pending_write_domain;
void *driver_private;
};
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
@ -580,10 +635,53 @@ struct drm_driver {
int (*kernel_context_switch) (struct drm_device *dev, int old,
int new);
void (*kernel_context_switch_unlock) (struct drm_device *dev);
int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
int (*dri_library_name) (struct drm_device *dev, char *buf);
/**
* get_vblank_counter - get raw hardware vblank counter
* @dev: DRM device
* @crtc: counter to fetch
*
* Driver callback for fetching a raw hardware vblank counter
* for @crtc. If a device doesn't have a hardware counter, the
* driver can simply return the value of drm_vblank_count and
* make the enable_vblank() and disable_vblank() hooks into no-ops,
* leaving interrupts enabled at all times.
*
* Wraparound handling and loss of events due to modesetting is dealt
* with in the DRM core code.
*
* RETURNS
* Raw vblank counter value.
*/
u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
/**
* enable_vblank - enable vblank interrupt events
* @dev: DRM device
* @crtc: which irq to enable
*
* Enable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
* interrupts will have to stay on to keep the count accurate.
*
* RETURNS
* Zero on success, appropriate errno if the given @crtc's vblank
* interrupt cannot be enabled.
*/
int (*enable_vblank) (struct drm_device *dev, int crtc);
/**
* disable_vblank - disable vblank interrupt events
* @dev: DRM device
* @crtc: which irq to enable
*
* Disable vblank interrupts for @crtc. If the device doesn't have
* a hardware vblank counter, this routine should be a no-op, since
* interrupts will have to stay on to keep the count accurate.
*/
void (*disable_vblank) (struct drm_device *dev, int crtc);
/**
* Called by \c drm_device_is_agp. Typically used to determine if a
* card is really attached to AGP or not.
@ -601,7 +699,7 @@ struct drm_driver {
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device *dev);
void (*irq_postinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
void (*irq_uninstall) (struct drm_device *dev);
void (*reclaim_buffers) (struct drm_device *dev,
struct drm_file * file_priv);
@ -614,6 +712,18 @@ struct drm_driver {
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
int (*proc_init)(struct drm_minor *minor);
void (*proc_cleanup)(struct drm_minor *minor);
/**
* Driver-specific constructor for drm_gem_objects, to set up
* obj->driver_private.
*
* Returns 0 on success.
*/
int (*gem_init_object) (struct drm_gem_object *obj);
void (*gem_free_object) (struct drm_gem_object *obj);
int major;
int minor;
int patchlevel;
@ -714,7 +824,6 @@ struct drm_device {
/** \name Context support */
/*@{ */
int irq; /**< Interrupt used by board */
int irq_enabled; /**< True if irq handler is enabled */
__volatile__ long context_flag; /**< Context swapping flag */
__volatile__ long interrupt_flag; /**< Interruption handler flag */
@ -730,13 +839,28 @@ struct drm_device {
/** \name VBLANK IRQ support */
/*@{ */
wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
atomic_t vbl_received;
atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
/*
* At load time, disabling the vblank interrupt won't be allowed since
* old clients may not call the modeset ioctl and therefore misbehave.
* Once the modeset ioctl *has* been called though, we can safely
* disable them when unused.
*/
int vblank_disable_allowed;
wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
spinlock_t vbl_lock;
struct list_head vbl_sigs; /**< signal list to send on VBLANK */
struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
unsigned int vbl_pending;
struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
u32 *last_vblank; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
int *vblank_enabled; /* so we don't call enable more than
once per disable */
int *vblank_inmodeset; /* Display driver is setting mode */
struct timer_list vblank_disable_timer;
u32 max_vblank_count; /**< size of vblank counter register */
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
void (*locked_tasklet_func)(struct drm_device *dev);
@ -757,6 +881,7 @@ struct drm_device {
struct pci_controller *hose;
#endif
struct drm_sg_mem *sg; /**< Scatter gather memory */
int num_crtcs; /**< Number of CRTCs on this device */
void *dev_private; /**< device private data */
struct drm_sigdata sigdata; /**< For block_all_signals */
sigset_t sigmask;
@ -771,8 +896,29 @@ struct drm_device {
spinlock_t drw_lock;
struct idr drw_idr;
/*@} */
/** \name GEM information */
/*@{ */
spinlock_t object_name_lock;
struct idr object_name_idr;
atomic_t object_count;
atomic_t object_memory;
atomic_t pin_count;
atomic_t pin_memory;
atomic_t gtt_count;
atomic_t gtt_memory;
uint32_t gtt_total;
uint32_t invalidate_domains; /* domains pending invalidation */
uint32_t flush_domains; /* domains pending flush */
/*@} */
};
static inline int drm_dev_to_irq(struct drm_device *dev)
{
return dev->pdev->irq;
}
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
{
@ -867,6 +1013,11 @@ extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
struct page **pages,
unsigned long num_pages,
uint32_t gtt_offset,
uint32_t type);
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
/* Misc. IOCTL support (drm_ioctl.h) */
@ -929,6 +1080,9 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
extern int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* Cache management (drm_cache.c) */
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
/* Locking IOCTL support (drm_lock.h) */
extern int drm_lock(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@ -985,15 +1139,25 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev,
extern int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
extern int drm_irq_install(struct drm_device *dev);
extern int drm_irq_uninstall(struct drm_device *dev);
extern void drm_driver_irq_preinstall(struct drm_device *dev);
extern void drm_driver_irq_postinstall(struct drm_device *dev);
extern void drm_driver_irq_uninstall(struct drm_device *dev);
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv);
struct drm_file *filp);
extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
extern void drm_vbl_send_signals(struct drm_device *dev);
extern void drm_locked_tasklet(struct drm_device *dev,
void(*func)(struct drm_device *));
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern void drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
/* Modesetting support */
extern int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
/* AGP/GART support (drm_agpsupport.h) */
@ -1026,6 +1190,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
extern void drm_agp_chipset_flush(struct drm_device *dev);
/* Stub support (drm_stub.h) */
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
@ -1088,6 +1253,66 @@ extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
/* Graphics Execution Manager library functions (drm_gem.c) */
int drm_gem_init(struct drm_device *dev);
void drm_gem_object_free(struct kref *kref);
struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
size_t size);
void drm_gem_object_handle_free(struct kref *kref);
static inline void
drm_gem_object_reference(struct drm_gem_object *obj)
{
kref_get(&obj->refcount);
}
static inline void
drm_gem_object_unreference(struct drm_gem_object *obj)
{
if (obj == NULL)
return;
kref_put(&obj->refcount, drm_gem_object_free);
}
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
int *handlep);
static inline void
drm_gem_object_handle_reference(struct drm_gem_object *obj)
{
drm_gem_object_reference(obj);
kref_get(&obj->handlecount);
}
static inline void
drm_gem_object_handle_unreference(struct drm_gem_object *obj)
{
if (obj == NULL)
return;
/*
* Must bump handle count first as this may be the last
* ref, in which case the object would disappear before we
* checked for a name
*/
kref_put(&obj->handlecount, drm_gem_object_handle_free);
drm_gem_object_unreference(obj);
}
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
struct drm_file *filp,
int handle);
int drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);

View file

@ -84,18 +84,18 @@
{0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@ -113,8 +113,10 @@
{0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
{0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
{0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
{0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
@ -122,16 +124,16 @@
{0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
{0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
{0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
@ -237,6 +239,10 @@
{0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0, 0, 0}
#define r128_PCI_IDS \

View file

@ -143,6 +143,22 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GET_VBLANK_PIPE 0x0e
#define DRM_I915_VBLANK_SWAP 0x0f
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_GEM_INIT 0x13
#define DRM_I915_GEM_EXECBUFFER 0x14
#define DRM_I915_GEM_PIN 0x15
#define DRM_I915_GEM_UNPIN 0x16
#define DRM_I915_GEM_BUSY 0x17
#define DRM_I915_GEM_THROTTLE 0x18
#define DRM_I915_GEM_ENTERVT 0x19
#define DRM_I915_GEM_LEAVEVT 0x1a
#define DRM_I915_GEM_CREATE 0x1b
#define DRM_I915_GEM_PREAD 0x1c
#define DRM_I915_GEM_PWRITE 0x1d
#define DRM_I915_GEM_MMAP 0x1e
#define DRM_I915_GEM_SET_DOMAIN 0x1f
#define DRM_I915_GEM_SW_FINISH 0x20
#define DRM_I915_GEM_SET_TILING 0x21
#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@ -160,6 +176,20 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@ -200,6 +230,8 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
typedef struct drm_i915_getparam {
int param;
@ -267,4 +299,305 @@ typedef struct drm_i915_hws_addr {
uint64_t addr;
} drm_i915_hws_addr_t;
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory
* manager.
*/
uint64_t gtt_start;
/**
* Ending offset in the GTT to be managed by the DRM memory
* manager.
*/
uint64_t gtt_end;
};
struct drm_i915_gem_create {
/**
* Requested size for the object.
*
* The (page-aligned) allocated size for the object will be returned.
*/
uint64_t size;
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
uint32_t handle;
uint32_t pad;
};
struct drm_i915_gem_pread {
/** Handle for the object being read. */
uint32_t handle;
uint32_t pad;
/** Offset into the object to read from */
uint64_t offset;
/** Length of data to read */
uint64_t size;
/**
* Pointer to write the data into.
*
* This is a fixed-size type for 32/64 compatibility.
*/
uint64_t data_ptr;
};
struct drm_i915_gem_pwrite {
/** Handle for the object being written to. */
uint32_t handle;
uint32_t pad;
/** Offset into the object to write to */
uint64_t offset;
/** Length of data to write */
uint64_t size;
/**
* Pointer to read the data from.
*
* This is a fixed-size type for 32/64 compatibility.
*/
uint64_t data_ptr;
};
struct drm_i915_gem_mmap {
/** Handle for the object being mapped. */
uint32_t handle;
uint32_t pad;
/** Offset in the object to map. */
uint64_t offset;
/**
* Length of data to map.
*
* The value will be page-aligned.
*/
uint64_t size;
/**
* Returned pointer the data was mapped at.
*
* This is a fixed-size type for 32/64 compatibility.
*/
uint64_t addr_ptr;
};
struct drm_i915_gem_set_domain {
/** Handle for the object */
uint32_t handle;
/** New read domains */
uint32_t read_domains;
/** New write domain */
uint32_t write_domain;
};
struct drm_i915_gem_sw_finish {
/** Handle for the object */
uint32_t handle;
};
struct drm_i915_gem_relocation_entry {
/**
* Handle of the buffer being pointed to by this relocation entry.
*
* It's appealing to make this be an index into the mm_validate_entry
* list to refer to the buffer, but this allows the driver to create
* a relocation list for state buffers and not re-write it per
* exec using the buffer.
*/
uint32_t target_handle;
/**
* Value to be added to the offset of the target buffer to make up
* the relocation entry.
*/
uint32_t delta;
/** Offset in the buffer the relocation entry will be written into */
uint64_t offset;
/**
* Offset value of the target buffer that the relocation entry was last
* written as.
*
* If the buffer has the same offset as last time, we can skip syncing
* and writing the relocation. This value is written back out by
* the execbuffer ioctl when the relocation is written.
*/
uint64_t presumed_offset;
/**
* Target memory domains read by this operation.
*/
uint32_t read_domains;
/**
* Target memory domains written by this operation.
*
* Note that only one domain may be written by the whole
* execbuffer operation, so that where there are conflicts,
* the application will get -EINVAL back.
*/
uint32_t write_domain;
};
/** @{
* Intel memory domains
*
* Most of these just align with the various caches in
* the system and are used to flush and invalidate as
* objects end up cached in different domains.
*/
/** CPU cache */
#define I915_GEM_DOMAIN_CPU 0x00000001
/** Render cache, used by 2D and 3D drawing */
#define I915_GEM_DOMAIN_RENDER 0x00000002
/** Sampler cache, used by texture engine */
#define I915_GEM_DOMAIN_SAMPLER 0x00000004
/** Command queue, used to load batch buffers */
#define I915_GEM_DOMAIN_COMMAND 0x00000008
/** Instruction cache, used by shader programs */
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
/** Vertex address cache */
#define I915_GEM_DOMAIN_VERTEX 0x00000020
/** GTT domain - aperture and scanout */
#define I915_GEM_DOMAIN_GTT 0x00000040
/** @} */
struct drm_i915_gem_exec_object {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
uint32_t handle;
/** Number of relocations to be performed on this buffer */
uint32_t relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
uint64_t relocs_ptr;
/** Required alignment in graphics aperture */
uint64_t alignment;
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
uint64_t offset;
};
struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated with their relocations to be
* performend on them.
*
* This is a pointer to an array of struct drm_i915_gem_validate_entry.
*
* These buffers must be listed in an order such that all relocations
* a buffer is performing refer to buffers that have already appeared
* in the validate list.
*/
uint64_t buffers_ptr;
uint32_t buffer_count;
/** Offset in the batchbuffer to start execution from. */
uint32_t batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
uint32_t batch_len;
uint32_t DR1;
uint32_t DR4;
uint32_t num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
uint64_t cliprects_ptr;
};
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
uint32_t handle;
uint32_t pad;
/** alignment required within the aperture */
uint64_t alignment;
/** Returned GTT offset of the buffer. */
uint64_t offset;
};
struct drm_i915_gem_unpin {
/** Handle of the buffer to be unpinned. */
uint32_t handle;
uint32_t pad;
};
struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
uint32_t handle;
/** Return busy status (1 if busy, 0 if idle) */
uint32_t busy;
};
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
#define I915_BIT_6_SWIZZLE_NONE 0
#define I915_BIT_6_SWIZZLE_9 1
#define I915_BIT_6_SWIZZLE_9_10 2
#define I915_BIT_6_SWIZZLE_9_11 3
#define I915_BIT_6_SWIZZLE_9_10_11 4
/* Not seen by userland */
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
struct drm_i915_gem_set_tiling {
/** Handle of the buffer to have its tiling state updated */
uint32_t handle;
/**
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*
* This value is to be set on request, and will be updated by the
* kernel on successful return with the actual chosen tiling layout.
*
* The tiling mode may be demoted to I915_TILING_NONE when the system
* has bit 6 swizzling that can't be managed correctly by GEM.
*
* Buffer contents become undefined when changing tiling_mode.
*/
uint32_t tiling_mode;
/**
* Stride in bytes for the object when in I915_TILING_X or
* I915_TILING_Y.
*/
uint32_t stride;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
uint32_t swizzle_mode;
};
struct drm_i915_gem_get_tiling {
/** Handle of the buffer to get tiling state for. */
uint32_t handle;
/**
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*/
uint32_t tiling_mode;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
uint32_t swizzle_mode;
};
#endif /* _I915_DRM_H_ */

View file

@ -2580,6 +2580,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
shmem_unacct_size(flags, size);
return ERR_PTR(error);
}
EXPORT_SYMBOL_GPL(shmem_file_setup);
/**
* shmem_zero_setup - setup a shared anonymous mapping