staging: android: Fix checkpatch block comments warnings

This patch is intended to fix the checkpatch warning for ``block``
comments for staging/android driver.

Signed-off-by: Sriram Raghunathan <sriram@marirs.net.in>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Sriram Raghunathan 2015-09-22 22:35:51 +05:30 committed by Greg Kroah-Hartman
parent 56b4c04927
commit 7e4161747b
7 changed files with 52 additions and 32 deletions

View File

@ -1,5 +1,4 @@
/*
* drivers/staging/android/ion/compat_ion.h
*
* Copyright (C) 2013 Google, Inc.

View File

@ -1,5 +1,5 @@
/*
*
* drivers/staging/android/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
@ -244,14 +244,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock);
/* this will set up dma addresses for the sglist -- it is not
technically correct as per the dma api -- a specific
device isn't really taking ownership here. However, in practice on
our systems the only dma_address space is physical addresses.
Additionally, we can't afford the overhead of invalidating every
allocation via dma_map_sg. The implicit contract here is that
memory coming from the heaps is ready for dma, ie if it has a
cached mapping that mapping has been invalidated */
/*
* this will set up dma addresses for the sglist -- it is not
* technically correct as per the dma api -- a specific
* device isn't really taking ownership here. However, in practice on
* our systems the only dma_address space is physical addresses.
* Additionally, we can't afford the overhead of invalidating every
* allocation via dma_map_sg. The implicit contract here is that
* memory coming from the heaps is ready for dma, ie if it has a
* cached mapping that mapping has been invalidated
*/
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
sg_dma_address(sg) = sg_phys(sg);
mutex_lock(&dev->buffer_lock);
@ -753,8 +755,10 @@ struct ion_client *ion_client_create(struct ion_device *dev,
get_task_struct(current->group_leader);
task_lock(current->group_leader);
pid = task_pid_nr(current->group_leader);
/* don't bother to store task struct for kernel threads,
they can't be killed anyway */
/*
* don't bother to store task struct for kernel threads,
* they can't be killed anyway
*/
if (current->group_leader->flags & PF_KTHREAD) {
put_task_struct(current->group_leader);
task = NULL;
@ -1521,8 +1525,10 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
heap->dev = dev;
down_write(&dev->lock);
/* use negative heap->id to reverse the priority -- when traversing
the list later attempt higher id numbers first */
/*
* use negative heap->id to reverse the priority -- when traversing
* the list later attempt higher id numbers first
*/
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
debug_file = debugfs_create_file(heap->name, 0664,

View File

@ -28,10 +28,12 @@ struct ion_mapper;
struct ion_client;
struct ion_buffer;
/* This should be removed some day when phys_addr_t's are fully
plumbed in the kernel, and all instances of ion_phys_addr_t should
be converted to phys_addr_t. For the time being many kernel interfaces
do not accept phys_addr_t's that would have to */
/*
* This should be removed some day when phys_addr_t's are fully
* plumbed in the kernel, and all instances of ion_phys_addr_t should
* be converted to phys_addr_t. For the time being many kernel interfaces
* do not accept phys_addr_t's that would have to
*/
#define ion_phys_addr_t unsigned long
/**

View File

@ -180,8 +180,10 @@ struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
return ERR_PTR(-ENOMEM);
cma_heap->heap.ops = &ion_cma_ops;
/* get device from private heaps data, later it will be
* used to make the link with reserved CMA memory */
/*
* get device from private heaps data, later it will be
* used to make the link with reserved CMA memory
*/
cma_heap->dev = data->priv;
cma_heap->heap.type = ION_HEAP_TYPE_DMA;
return &cma_heap->heap;

View File

@ -346,7 +346,8 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
* to keep a pool of pre allocated memory to use from your heap. Keeping
* a pool of memory that is ready for dma, ie any cached mapping have been
* invalidated from the cache, provides a significant performance benefit on
* many systems */
* many systems
*/
/**
* struct ion_page_pool - pagepool struct

View File

@ -185,8 +185,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
struct scatterlist *sg;
int i;
/* uncached pages come from the page pools, zero them before returning
for security purposes (other allocations are zerod at alloc time */
/*
* uncached pages come from the page pools, zero them before returning
* for security purposes (other allocations are zerod at
* alloc time
*/
if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero(buffer);

View File

@ -40,8 +40,10 @@ enum ion_heap_type {
ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_DMA,
ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
are at the end of this enum */
ION_HEAP_TYPE_CUSTOM, /*
* must be last so device specific heaps always
* are at the end of this enum
*/
ION_NUM_HEAPS = 16,
};
@ -56,13 +58,18 @@ enum ion_heap_type {
* allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
cached, ion will do cache
maintenance when the buffer is
mapped for dma */
#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
at mmap time, if this is set
caches must be managed manually */
#define ION_FLAG_CACHED 1 /*
* mappings of this buffer should be
* cached, ion will do cache
* maintenance when the buffer is
* mapped for dma
*/
#define ION_FLAG_CACHED_NEEDS_SYNC 2 /*
* mappings of this buffer will created
* at mmap time, if this is set
* caches must be managed
* manually
*/
/**
* DOC: Ion Userspace API