drm/i915: Fix a buch of kerneldoc warnings

Just a bunch of stale kerneldocs generating warnings when
building the docs. Mostly function parameters so not very
useful but still.

v2: Tidy.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1464958937-23344-1-git-send-email-tvrtko.ursulin@linux.intel.com
This commit is contained in:
Tvrtko Ursulin 2016-06-03 14:02:17 +01:00
parent f90e8c36c8
commit 14bb2c1179
6 changed files with 67 additions and 26 deletions

View File

@ -737,7 +737,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
/**
* i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
* @ring: the ringbuffer to initialize
* @engine: the engine to initialize
*
* Optionally initializes fields related to batch buffer command parsing in the
* struct intel_engine_cs based on whether the platform requires software
@ -830,7 +830,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
/**
* i915_cmd_parser_fini_ring() - clean up cmd parser related fields
* @ring: the ringbuffer to clean up
* @engine: the engine to clean up
*
* Releases any resources related to command parsing that may have been
* initialized for the specified ring.
@ -1024,7 +1024,7 @@ unpin_src:
/**
* i915_needs_cmd_parser() - should a given ring use software command parsing?
* @ring: the ring in question
* @engine: the engine in question
*
* Only certain platforms require software batch buffer command parsing, and
* only when enabled via module parameter.
@ -1176,7 +1176,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
/**
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
* @ring: the ring on which the batch is to execute
* @engine: the engine on which the batch is to execute
* @batch_obj: the batch buffer in question
* @shadow_batch_obj: copy of the batch buffer in question
* @batch_start_offset: byte offset in the batch at which execution starts
@ -1281,6 +1281,7 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
/**
* i915_cmd_parser_get_version() - get the cmd parser version number
* @dev_priv: i915 device private
*
* The cmd parser maintains a simple increasing integer version number suitable
* for passing to userspace clients to determine what operations are permitted.

View File

@ -409,6 +409,9 @@ i915_gem_dumb_create(struct drm_file *file,
/**
* Creates a new mm object and returns a handle to it.
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
*/
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
@ -672,6 +675,9 @@ out:
/**
* Reads data from the object referenced by handle.
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
*
* On error, the contents of *data are undefined.
*/
@ -753,6 +759,10 @@ fast_user_write(struct io_mapping *mapping,
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
* @dev: drm device pointer
* @obj: i915 gem object
* @args: pwrite arguments structure
* @file: drm file pointer
*/
static int
i915_gem_gtt_pwrite_fast(struct drm_device *dev,
@ -1016,6 +1026,9 @@ out:
/**
* Writes data to the object referenced by handle.
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
*
* On error, the contents of the buffer that were to be modified are undefined.
*/
@ -1213,6 +1226,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* @req: duh!
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
* @rps: RPS client
*
* Note: It is of utmost importance that the passed in seqno and reset_counter
* values have been read by the caller in an smp safe manner. Where read-side
@ -1446,6 +1460,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
/**
* Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event.
* @req: request to wait on
*/
int
i915_wait_request(struct drm_i915_gem_request *req)
@ -1472,6 +1487,8 @@ i915_wait_request(struct drm_i915_gem_request *req)
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
* @obj: i915 gem object
* @readonly: waiting for read access or write
*/
int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
@ -1589,6 +1606,9 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@ -1652,6 +1672,9 @@ unlock:
/**
* Called when user space has done writes to this buffer
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
*/
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@ -1682,8 +1705,11 @@ unlock:
}
/**
* Maps the contents of an object, returning the address it is mapped
* into.
* i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
* it is mapped to.
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
@ -2001,7 +2027,10 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
/**
* i915_gem_get_gtt_alignment - return required GTT alignment for an object
* @obj: object to check
* @dev: drm device
* @size: object size
* @tiling_mode: tiling mode
* @fenced: is fenced alignemned required or not
*
* Return the required GTT alignment for an object, taking into account
* potential fence register mapping.
@ -2951,6 +2980,7 @@ void i915_gem_reset(struct drm_device *dev)
/**
* This function clears the request list as sequence numbers are passed.
* @engine: engine to retire requests on
*/
void
i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
@ -3074,6 +3104,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
* Ensures that an object will eventually get non-busy by flushing any required
* write domains, emitting any outstanding lazy request and retiring and
* completed requests.
* @obj: object to flush
*/
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
@ -3099,7 +3130,9 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
* @DRM_IOCTL_ARGS: standard ioctl arguments
* @dev: drm device pointer
* @data: ioctl data blob
* @file: drm file pointer
*
* Returns 0 if successful, else an error is returned with the remaining time in
* the timeout parameter.
@ -3489,6 +3522,11 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
/**
* Finds free space in the GTT aperture and binds the object or a view of it
* there.
* @obj: object to bind
* @vm: address space to bind into
* @ggtt_view: global gtt view if applicable
* @alignment: requested alignment
* @flags: mask of PIN_* flags to use
*/
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
@ -3746,6 +3784,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
/**
* Moves a single object to the GTT read, and possibly write domain.
* @obj: object to act on
* @write: ask for write access or read only
*
* This function returns when the move is complete, including waiting on
* flushes to occur.
@ -3817,6 +3857,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/**
* Changes the cache-level of an object across all VMA.
* @obj: object to act on
* @cache_level: new cache level to set for the object
*
* After this function returns, the object will be in the new cache-level
* across all GTT and the contents of the backing storage will be coherent,
@ -4098,6 +4140,8 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
/**
* Moves a single object to the CPU read, and possibly write domain.
* @obj: object to act on
* @write: requesting write or read-only access
*
* This function returns when the move is complete, including waiting on
* flushes to occur.

View File

@ -588,7 +588,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
* @dev: drm device
* @dev_priv: i915 device private
*/
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
{
@ -2517,7 +2517,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
/**
* i915_reset_and_wakeup - do process context error handling work
* @dev: drm device
* @dev_priv: i915 device private
*
* Fire an error uevent so userspace can see that a hang or error
* was detected.
@ -2674,13 +2674,14 @@ static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
/**
* i915_handle_error - handle a gpu error
* @dev: drm device
* @dev_priv: i915 device private
* @engine_mask: mask representing engines that are hung
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
* @fmt: Error message format string
*/
void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,

View File

@ -53,7 +53,7 @@
/**
* i915_check_vgpu - detect virtual GPU
* @dev: drm device *
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
@ -135,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
/**
* intel_vgt_balloon - balloon out reserved graphics address trunks
* @dev_priv: i915 device
* @dev: drm device
*
* This function is called at the initialization stage, to balloon out the
* graphic address space allocated to other vGPUs, by marking these spaces as

View File

@ -238,7 +238,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
* @dev: DRM device.
* @dev_priv: i915 device private
* @enable_execlists: value of i915.enable_execlists module parameter.
*
* Only certain platforms support Execlists (the prerequisites being
@ -516,7 +516,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
/**
* intel_lrc_irq_handler() - handle Context Switch interrupts
* @engine: Engine Command Streamer to handle.
* @data: tasklet handler passed in unsigned long
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
@ -786,15 +786,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
/**
* execlists_submission() - submit a batchbuffer for execution, Execlists style
* @dev: DRM device.
* @file: DRM file.
* @ring: Engine Command Streamer to submit to.
* @ctx: Context to employ for this submission.
* @params: execbuffer call parameters.
* @args: execbuffer call arguments.
* @vmas: list of vmas.
* @batch_obj: the batchbuffer to submit.
* @exec_start: batchbuffer start virtual address pointer.
* @dispatch_flags: translated execbuffer call flags.
*
* This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
* away the submission details of the execbuffer ioctl call.
@ -1138,7 +1132,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
/**
* gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
*
* @ring: only applicable for RCS
* @engine: only applicable for RCS
* @wa_ctx: structure representing wa_ctx
* offset: specifies start of the batch, should be cache-aligned. This is updated
* with the offset value received as input.
@ -1212,7 +1206,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
/**
* gen8_init_perctx_bb() - initialize per ctx batch with WA
*
* @ring: only applicable for RCS
* @engine: only applicable for RCS
* @wa_ctx: structure representing wa_ctx
* offset: specifies start of the batch, should be cache-aligned.
* size: size of the batch in DWORDS but HW expects in terms of cachelines
@ -1860,7 +1854,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
/**
* intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
*
* @ring: Engine Command Streamer.
* @engine: Engine Command Streamer.
*
*/
void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
@ -2413,7 +2407,7 @@ populate_lr_context(struct i915_gem_context *ctx,
/**
* intel_lr_context_size() - return the size of the context for an engine
* @ring: which engine to find the context size for
* @engine: which engine to find the context size for
*
* Each engine may require a different amount of space for a context image,
* so when allocating (or copying) an image, this function can be used to

View File

@ -2448,6 +2448,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
/**
* intel_power_domains_init_hw - initialize hardware power domain state
* @dev_priv: i915 device instance
* @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
* power domains using intel_display_set_init_power().