mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-29 22:02:02 +00:00
drm/sched: Convert drm scheduler to use a work queue rather than kthread
In Xe, the new Intel GPU driver, a choice has made to have a 1 to 1 mapping between a drm_gpu_scheduler and drm_sched_entity. At first this seems a bit odd but let us explain the reasoning below. 1. In Xe the submission order from multiple drm_sched_entity is not guaranteed to be the same completion even if targeting the same hardware engine. This is because in Xe we have a firmware scheduler, the GuC, which allowed to reorder, timeslice, and preempt submissions. If a using shared drm_gpu_scheduler across multiple drm_sched_entity, the TDR falls apart as the TDR expects submission order == completion order. Using a dedicated drm_gpu_scheduler per drm_sched_entity solve this problem. 2. In Xe submissions are done via programming a ring buffer (circular buffer), a drm_gpu_scheduler provides a limit on number of jobs, if the limit of number jobs is set to RING_SIZE / MAX_SIZE_PER_JOB we get flow control on the ring for free. A problem with this design is currently a drm_gpu_scheduler uses a kthread for submission / job cleanup. This doesn't scale if a large number of drm_gpu_scheduler are used. To work around the scaling issue, use a worker rather than kthread for submission / job cleanup. v2: - (Rob Clark) Fix msm build - Pass in run work queue v3: - (Boris) don't have loop in worker v4: - (Tvrtko) break out submit ready, stop, start helpers into own patch v5: - (Boris) default to ordered work queue v6: - (Luben / checkpatch) fix alignment in msm_ringbuffer.c - (Luben) s/drm_sched_submit_queue/drm_sched_wqueue_enqueue - (Luben) Update comment for drm_sched_wqueue_enqueue - (Luben) Positive check for submit_wq in drm_sched_init - (Luben) s/alloc_submit_wq/own_submit_wq v7: - (Luben) s/drm_sched_wqueue_enqueue/drm_sched_run_job_queue v8: - (Luben) Adjust var names / comments Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Luben Tuikov <luben.tuikov@amd.com> Link: https://lore.kernel.org/r/20231031032439.1558703-3-matthew.brost@intel.com Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
This commit is contained in:
parent
35963cf2cd
commit
a6149f0393
9 changed files with 87 additions and 82 deletions
|
@ -2279,7 +2279,7 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
|
|||
break;
|
||||
}
|
||||
|
||||
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL,
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
ring->num_hw_submission, 0,
|
||||
timeout, adev->reset_domain->wq,
|
||||
|
|
|
@ -134,7 +134,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
|
||||
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL,
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
|
||||
msecs_to_jiffies(500), NULL, NULL,
|
||||
|
|
|
@ -488,7 +488,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
|
|||
|
||||
INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
|
||||
|
||||
return drm_sched_init(&pipe->base, &lima_sched_ops,
|
||||
return drm_sched_init(&pipe->base, &lima_sched_ops, NULL,
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
1,
|
||||
lima_job_hang_limit,
|
||||
|
|
|
@ -94,7 +94,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
|
|||
/* currently managing hangcheck ourselves: */
|
||||
sched_timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
|
||||
ret = drm_sched_init(&ring->sched, &msm_sched_ops,
|
||||
ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL,
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
num_hw_submissions, 0, sched_timeout,
|
||||
NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
|
||||
|
|
|
@ -429,7 +429,7 @@ int nouveau_sched_init(struct nouveau_drm *drm)
|
|||
if (!drm->sched_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
return drm_sched_init(sched, &nouveau_sched_ops,
|
||||
return drm_sched_init(sched, &nouveau_sched_ops, NULL,
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
NOUVEAU_SCHED_HW_SUBMISSIONS, 0, job_hang_limit,
|
||||
NULL, NULL, "nouveau_sched", drm->dev->dev);
|
||||
|
|
|
@ -852,7 +852,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
|
|||
js->queue[j].fence_context = dma_fence_context_alloc(1);
|
||||
|
||||
ret = drm_sched_init(&js->queue[j].sched,
|
||||
&panfrost_sched_ops,
|
||||
&panfrost_sched_ops, NULL,
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
nentries, 0,
|
||||
msecs_to_jiffies(JOB_TIMEOUT_MS),
|
||||
|
|
|
@ -48,7 +48,6 @@
|
|||
* through the jobs entity pointer.
|
||||
*/
|
||||
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/completion.h>
|
||||
|
@ -256,6 +255,16 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
|
|||
return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_sched_run_job_queue - enqueue run-job work
|
||||
* @sched: scheduler instance
|
||||
*/
|
||||
static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
if (!READ_ONCE(sched->pause_submit))
|
||||
queue_work(sched->submit_wq, &sched->work_run_job);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_sched_job_done - complete a job
|
||||
* @s_job: pointer to the job which is done
|
||||
|
@ -275,7 +284,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
|
|||
dma_fence_get(&s_fence->finished);
|
||||
drm_sched_fence_finished(s_fence, result);
|
||||
dma_fence_put(&s_fence->finished);
|
||||
wake_up_interruptible(&sched->wake_up_worker);
|
||||
drm_sched_run_job_queue(sched);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -874,7 +883,7 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
|
|||
void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
if (drm_sched_can_queue(sched))
|
||||
wake_up_interruptible(&sched->wake_up_worker);
|
||||
drm_sched_run_job_queue(sched);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -985,60 +994,41 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
|
|||
EXPORT_SYMBOL(drm_sched_pick_best);
|
||||
|
||||
/**
|
||||
* drm_sched_blocked - check if the scheduler is blocked
|
||||
* drm_sched_run_job_work - main scheduler thread
|
||||
*
|
||||
* @sched: scheduler instance
|
||||
*
|
||||
* Returns true if blocked, otherwise false.
|
||||
* @w: run job work
|
||||
*/
|
||||
static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
|
||||
static void drm_sched_run_job_work(struct work_struct *w)
|
||||
{
|
||||
if (kthread_should_park()) {
|
||||
kthread_parkme();
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_sched_main - main scheduler thread
|
||||
*
|
||||
* @param: scheduler instance
|
||||
*
|
||||
* Returns 0.
|
||||
*/
|
||||
static int drm_sched_main(void *param)
|
||||
{
|
||||
struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
|
||||
struct drm_gpu_scheduler *sched =
|
||||
container_of(w, struct drm_gpu_scheduler, work_run_job);
|
||||
struct drm_sched_entity *entity;
|
||||
struct drm_sched_job *cleanup_job;
|
||||
int r;
|
||||
|
||||
sched_set_fifo_low(current);
|
||||
if (READ_ONCE(sched->pause_submit))
|
||||
return;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
struct drm_sched_entity *entity = NULL;
|
||||
cleanup_job = drm_sched_get_cleanup_job(sched);
|
||||
entity = drm_sched_select_entity(sched);
|
||||
|
||||
if (!entity && !cleanup_job)
|
||||
return; /* No more work */
|
||||
|
||||
if (cleanup_job)
|
||||
sched->ops->free_job(cleanup_job);
|
||||
|
||||
if (entity) {
|
||||
struct dma_fence *fence;
|
||||
struct drm_sched_fence *s_fence;
|
||||
struct drm_sched_job *sched_job;
|
||||
struct dma_fence *fence;
|
||||
struct drm_sched_job *cleanup_job = NULL;
|
||||
|
||||
wait_event_interruptible(sched->wake_up_worker,
|
||||
(cleanup_job = drm_sched_get_cleanup_job(sched)) ||
|
||||
(!drm_sched_blocked(sched) &&
|
||||
(entity = drm_sched_select_entity(sched))) ||
|
||||
kthread_should_stop());
|
||||
|
||||
if (cleanup_job)
|
||||
sched->ops->free_job(cleanup_job);
|
||||
|
||||
if (!entity)
|
||||
continue;
|
||||
|
||||
sched_job = drm_sched_entity_pop_job(entity);
|
||||
|
||||
if (!sched_job) {
|
||||
complete_all(&entity->entity_idle);
|
||||
continue;
|
||||
if (!cleanup_job)
|
||||
return; /* No more work */
|
||||
goto again;
|
||||
}
|
||||
|
||||
s_fence = sched_job->s_fence;
|
||||
|
@ -1069,7 +1059,9 @@ static int drm_sched_main(void *param)
|
|||
|
||||
wake_up(&sched->job_scheduled);
|
||||
}
|
||||
return 0;
|
||||
|
||||
again:
|
||||
drm_sched_run_job_queue(sched);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1077,6 +1069,8 @@ static int drm_sched_main(void *param)
|
|||
*
|
||||
* @sched: scheduler instance
|
||||
* @ops: backend operations for this scheduler
|
||||
* @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
|
||||
* allocated and used
|
||||
* @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
|
||||
* @hw_submission: number of hw submissions that can be in flight
|
||||
* @hang_limit: number of times to allow a job to hang before dropping it
|
||||
|
@ -1091,6 +1085,7 @@ static int drm_sched_main(void *param)
|
|||
*/
|
||||
int drm_sched_init(struct drm_gpu_scheduler *sched,
|
||||
const struct drm_sched_backend_ops *ops,
|
||||
struct workqueue_struct *submit_wq,
|
||||
u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit,
|
||||
long timeout, struct workqueue_struct *timeout_wq,
|
||||
atomic_t *score, const char *name, struct device *dev)
|
||||
|
@ -1121,14 +1116,22 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (submit_wq) {
|
||||
sched->submit_wq = submit_wq;
|
||||
sched->own_submit_wq = false;
|
||||
} else {
|
||||
sched->submit_wq = alloc_ordered_workqueue(name, 0);
|
||||
if (!sched->submit_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
sched->own_submit_wq = true;
|
||||
}
|
||||
ret = -ENOMEM;
|
||||
sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!sched->sched_rq) {
|
||||
drm_err(sched, "%s: out of memory for sched_rq\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!sched->sched_rq)
|
||||
goto Out_free;
|
||||
sched->num_rqs = num_rqs;
|
||||
ret = -ENOMEM;
|
||||
for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) {
|
||||
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
|
||||
if (!sched->sched_rq[i])
|
||||
|
@ -1136,31 +1139,26 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
|
|||
drm_sched_rq_init(sched, sched->sched_rq[i]);
|
||||
}
|
||||
|
||||
init_waitqueue_head(&sched->wake_up_worker);
|
||||
init_waitqueue_head(&sched->job_scheduled);
|
||||
INIT_LIST_HEAD(&sched->pending_list);
|
||||
spin_lock_init(&sched->job_list_lock);
|
||||
atomic_set(&sched->hw_rq_count, 0);
|
||||
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
|
||||
INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
|
||||
atomic_set(&sched->_score, 0);
|
||||
atomic64_set(&sched->job_id_count, 0);
|
||||
|
||||
/* Each scheduler will run on a seperate kernel thread */
|
||||
sched->thread = kthread_run(drm_sched_main, sched, sched->name);
|
||||
if (IS_ERR(sched->thread)) {
|
||||
ret = PTR_ERR(sched->thread);
|
||||
sched->thread = NULL;
|
||||
DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
|
||||
goto Out_unroll;
|
||||
}
|
||||
sched->pause_submit = false;
|
||||
|
||||
sched->ready = true;
|
||||
return 0;
|
||||
Out_unroll:
|
||||
for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--)
|
||||
kfree(sched->sched_rq[i]);
|
||||
Out_free:
|
||||
kfree(sched->sched_rq);
|
||||
sched->sched_rq = NULL;
|
||||
if (sched->own_submit_wq)
|
||||
destroy_workqueue(sched->submit_wq);
|
||||
drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1178,8 +1176,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
|
|||
struct drm_sched_entity *s_entity;
|
||||
int i;
|
||||
|
||||
if (sched->thread)
|
||||
kthread_stop(sched->thread);
|
||||
drm_sched_wqueue_stop(sched);
|
||||
|
||||
for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
||||
struct drm_sched_rq *rq = sched->sched_rq[i];
|
||||
|
@ -1202,6 +1199,8 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
|
|||
/* Confirm no work left behind accessing device structures */
|
||||
cancel_delayed_work_sync(&sched->work_tdr);
|
||||
|
||||
if (sched->own_submit_wq)
|
||||
destroy_workqueue(sched->submit_wq);
|
||||
sched->ready = false;
|
||||
kfree(sched->sched_rq);
|
||||
sched->sched_rq = NULL;
|
||||
|
@ -1262,7 +1261,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
|
|||
*/
|
||||
bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
return !!sched->thread;
|
||||
return sched->ready;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_wqueue_ready);
|
||||
|
||||
|
@ -1273,7 +1272,8 @@ EXPORT_SYMBOL(drm_sched_wqueue_ready);
|
|||
*/
|
||||
void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
kthread_park(sched->thread);
|
||||
WRITE_ONCE(sched->pause_submit, true);
|
||||
cancel_work_sync(&sched->work_run_job);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_wqueue_stop);
|
||||
|
||||
|
@ -1284,6 +1284,7 @@ EXPORT_SYMBOL(drm_sched_wqueue_stop);
|
|||
*/
|
||||
void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
|
||||
{
|
||||
kthread_unpark(sched->thread);
|
||||
WRITE_ONCE(sched->pause_submit, false);
|
||||
queue_work(sched->submit_wq, &sched->work_run_job);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_sched_wqueue_start);
|
||||
|
|
|
@ -388,7 +388,7 @@ v3d_sched_init(struct v3d_dev *v3d)
|
|||
int ret;
|
||||
|
||||
ret = drm_sched_init(&v3d->queue[V3D_BIN].sched,
|
||||
&v3d_bin_sched_ops,
|
||||
&v3d_bin_sched_ops, NULL,
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
hw_jobs_limit, job_hang_limit,
|
||||
msecs_to_jiffies(hang_limit_ms), NULL,
|
||||
|
@ -397,7 +397,7 @@ v3d_sched_init(struct v3d_dev *v3d)
|
|||
return ret;
|
||||
|
||||
ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched,
|
||||
&v3d_render_sched_ops,
|
||||
&v3d_render_sched_ops, NULL,
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
hw_jobs_limit, job_hang_limit,
|
||||
msecs_to_jiffies(hang_limit_ms), NULL,
|
||||
|
@ -406,7 +406,7 @@ v3d_sched_init(struct v3d_dev *v3d)
|
|||
goto fail;
|
||||
|
||||
ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
|
||||
&v3d_tfu_sched_ops,
|
||||
&v3d_tfu_sched_ops, NULL,
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
hw_jobs_limit, job_hang_limit,
|
||||
msecs_to_jiffies(hang_limit_ms), NULL,
|
||||
|
@ -416,7 +416,7 @@ v3d_sched_init(struct v3d_dev *v3d)
|
|||
|
||||
if (v3d_has_csd(v3d)) {
|
||||
ret = drm_sched_init(&v3d->queue[V3D_CSD].sched,
|
||||
&v3d_csd_sched_ops,
|
||||
&v3d_csd_sched_ops, NULL,
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
hw_jobs_limit, job_hang_limit,
|
||||
msecs_to_jiffies(hang_limit_ms), NULL,
|
||||
|
@ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d)
|
|||
goto fail;
|
||||
|
||||
ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
|
||||
&v3d_cache_clean_sched_ops,
|
||||
&v3d_cache_clean_sched_ops, NULL,
|
||||
DRM_SCHED_PRIORITY_COUNT,
|
||||
hw_jobs_limit, job_hang_limit,
|
||||
msecs_to_jiffies(hang_limit_ms), NULL,
|
||||
|
|
|
@ -475,17 +475,16 @@ struct drm_sched_backend_ops {
|
|||
* @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
|
||||
* as there's usually one run-queue per priority, but could be less.
|
||||
* @sched_rq: An allocated array of run-queues of size @num_rqs;
|
||||
* @wake_up_worker: the wait queue on which the scheduler sleeps until a job
|
||||
* is ready to be scheduled.
|
||||
* @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
|
||||
* waits on this wait queue until all the scheduled jobs are
|
||||
* finished.
|
||||
* @hw_rq_count: the number of jobs currently in the hardware queue.
|
||||
* @job_id_count: used to assign unique id to the each job.
|
||||
* @submit_wq: workqueue used to queue @work_run_job
|
||||
* @timeout_wq: workqueue used to queue @work_tdr
|
||||
* @work_run_job: work which calls run_job op of each scheduler.
|
||||
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
|
||||
* timeout interval is over.
|
||||
* @thread: the kthread on which the scheduler which run.
|
||||
* @pending_list: the list of jobs which are currently in the job queue.
|
||||
* @job_list_lock: lock to protect the pending_list.
|
||||
* @hang_limit: once the hangs by a job crosses this limit then it is marked
|
||||
|
@ -494,6 +493,8 @@ struct drm_sched_backend_ops {
|
|||
* @_score: score used when the driver doesn't provide one
|
||||
* @ready: marks if the underlying HW is ready to work
|
||||
* @free_guilty: A hit to time out handler to free the guilty job.
|
||||
* @pause_submit: pause queuing of @work_run_job on @submit_wq
|
||||
* @own_submit_wq: scheduler owns allocation of @submit_wq
|
||||
* @dev: system &struct device
|
||||
*
|
||||
* One scheduler is implemented for each hardware ring.
|
||||
|
@ -505,13 +506,13 @@ struct drm_gpu_scheduler {
|
|||
const char *name;
|
||||
u32 num_rqs;
|
||||
struct drm_sched_rq **sched_rq;
|
||||
wait_queue_head_t wake_up_worker;
|
||||
wait_queue_head_t job_scheduled;
|
||||
atomic_t hw_rq_count;
|
||||
atomic64_t job_id_count;
|
||||
struct workqueue_struct *submit_wq;
|
||||
struct workqueue_struct *timeout_wq;
|
||||
struct work_struct work_run_job;
|
||||
struct delayed_work work_tdr;
|
||||
struct task_struct *thread;
|
||||
struct list_head pending_list;
|
||||
spinlock_t job_list_lock;
|
||||
int hang_limit;
|
||||
|
@ -519,11 +520,14 @@ struct drm_gpu_scheduler {
|
|||
atomic_t _score;
|
||||
bool ready;
|
||||
bool free_guilty;
|
||||
bool pause_submit;
|
||||
bool own_submit_wq;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
int drm_sched_init(struct drm_gpu_scheduler *sched,
|
||||
const struct drm_sched_backend_ops *ops,
|
||||
struct workqueue_struct *submit_wq,
|
||||
u32 num_rqs, uint32_t hw_submission, unsigned int hang_limit,
|
||||
long timeout, struct workqueue_struct *timeout_wq,
|
||||
atomic_t *score, const char *name, struct device *dev);
|
||||
|
|
Loading…
Reference in a new issue