x86/vector: Rename send_cleanup_vector() to vector_schedule_cleanup()

Rename send_cleanup_vector() to vector_schedule_cleanup() to prepare for
replacing the vector cleanup IPI with a timer callback.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Xin Li <xin3.li@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Steve Wahl <steve.wahl@hpe.com>
Link: https://lore.kernel.org/r/20230621171248.6805-2-xin3.li@intel.com
This commit is contained in:
Thomas Gleixner 2023-06-21 10:12:46 -07:00
parent 5d0c230f1d
commit a539cc86a1
6 changed files with 11 additions and 11 deletions

View file

@ -97,10 +97,10 @@ extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
extern void lock_vector_lock(void); extern void lock_vector_lock(void);
extern void unlock_vector_lock(void); extern void unlock_vector_lock(void);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void send_cleanup_vector(struct irq_cfg *); extern void vector_schedule_cleanup(struct irq_cfg *);
extern void irq_complete_move(struct irq_cfg *cfg); extern void irq_complete_move(struct irq_cfg *cfg);
#else #else
static inline void send_cleanup_vector(struct irq_cfg *c) { } static inline void vector_schedule_cleanup(struct irq_cfg *c) { }
static inline void irq_complete_move(struct irq_cfg *c) { } static inline void irq_complete_move(struct irq_cfg *c) { }
#endif #endif

View file

@ -967,7 +967,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
raw_spin_unlock(&vector_lock); raw_spin_unlock(&vector_lock);
} }
static void __send_cleanup_vector(struct apic_chip_data *apicd) static void __vector_schedule_cleanup(struct apic_chip_data *apicd)
{ {
unsigned int cpu; unsigned int cpu;
@ -983,13 +983,13 @@ static void __send_cleanup_vector(struct apic_chip_data *apicd)
raw_spin_unlock(&vector_lock); raw_spin_unlock(&vector_lock);
} }
void send_cleanup_vector(struct irq_cfg *cfg) void vector_schedule_cleanup(struct irq_cfg *cfg)
{ {
struct apic_chip_data *apicd; struct apic_chip_data *apicd;
apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
if (apicd->move_in_progress) if (apicd->move_in_progress)
__send_cleanup_vector(apicd); __vector_schedule_cleanup(apicd);
} }
void irq_complete_move(struct irq_cfg *cfg) void irq_complete_move(struct irq_cfg *cfg)
@ -1007,7 +1007,7 @@ void irq_complete_move(struct irq_cfg *cfg)
* on the same CPU. * on the same CPU.
*/ */
if (apicd->cpu == smp_processor_id()) if (apicd->cpu == smp_processor_id())
__send_cleanup_vector(apicd); __vector_schedule_cleanup(apicd);
} }
/* /*

View file

@ -58,7 +58,7 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
ret = parent->chip->irq_set_affinity(parent, mask, force); ret = parent->chip->irq_set_affinity(parent, mask, force);
if (ret >= 0) { if (ret >= 0) {
uv_program_mmr(cfg, data->chip_data); uv_program_mmr(cfg, data->chip_data);
send_cleanup_vector(cfg); vector_schedule_cleanup(cfg);
} }
return ret; return ret;

View file

@ -3681,7 +3681,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
* at the new destination. So, time to cleanup the previous * at the new destination. So, time to cleanup the previous
* vector allocation. * vector allocation.
*/ */
send_cleanup_vector(cfg); vector_schedule_cleanup(cfg);
return IRQ_SET_MASK_OK_DONE; return IRQ_SET_MASK_OK_DONE;
} }

View file

@ -51,7 +51,7 @@ static int hyperv_ir_set_affinity(struct irq_data *data,
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret; return ret;
send_cleanup_vector(cfg); vector_schedule_cleanup(cfg);
return 0; return 0;
} }
@ -257,7 +257,7 @@ static int hyperv_root_ir_set_affinity(struct irq_data *data,
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret; return ret;
send_cleanup_vector(cfg); vector_schedule_cleanup(cfg);
return 0; return 0;
} }

View file

@ -1176,7 +1176,7 @@ intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
* at the new destination. So, time to cleanup the previous * at the new destination. So, time to cleanup the previous
* vector allocation. * vector allocation.
*/ */
send_cleanup_vector(cfg); vector_schedule_cleanup(cfg);
return IRQ_SET_MASK_OK_DONE; return IRQ_SET_MASK_OK_DONE;
} }