| |
| |
| |
| |
| |
| |
| |
| |
| #include "sched.h" |
| |
| static inline int parent(int i) |
| { |
| <------>return (i - 1) >> 1; |
| } |
| |
| static inline int left_child(int i) |
| { |
| <------>return (i << 1) + 1; |
| } |
| |
| static inline int right_child(int i) |
| { |
| <------>return (i << 1) + 2; |
| } |
| |
| static void cpudl_heapify_down(struct cpudl *cp, int idx) |
| { |
| <------>int l, r, largest; |
| |
| <------>int orig_cpu = cp->elements[idx].cpu; |
| <------>u64 orig_dl = cp->elements[idx].dl; |
| |
| <------>if (left_child(idx) >= cp->size) |
| <------><------>return; |
| |
| <------> |
| <------>while (1) { |
| <------><------>u64 largest_dl; |
| |
| <------><------>l = left_child(idx); |
| <------><------>r = right_child(idx); |
| <------><------>largest = idx; |
| <------><------>largest_dl = orig_dl; |
| |
| <------><------>if ((l < cp->size) && dl_time_before(orig_dl, |
| <------><------><------><------><------><------>cp->elements[l].dl)) { |
| <------><------><------>largest = l; |
| <------><------><------>largest_dl = cp->elements[l].dl; |
| <------><------>} |
| <------><------>if ((r < cp->size) && dl_time_before(largest_dl, |
| <------><------><------><------><------><------>cp->elements[r].dl)) |
| <------><------><------>largest = r; |
| |
| <------><------>if (largest == idx) |
| <------><------><------>break; |
| |
| <------><------> |
| <------><------>cp->elements[idx].cpu = cp->elements[largest].cpu; |
| <------><------>cp->elements[idx].dl = cp->elements[largest].dl; |
| <------><------>cp->elements[cp->elements[idx].cpu].idx = idx; |
| <------><------>idx = largest; |
| <------>} |
| <------> |
| <------>cp->elements[idx].cpu = orig_cpu; |
| <------>cp->elements[idx].dl = orig_dl; |
| <------>cp->elements[cp->elements[idx].cpu].idx = idx; |
| } |
| |
| static void cpudl_heapify_up(struct cpudl *cp, int idx) |
| { |
| <------>int p; |
| |
| <------>int orig_cpu = cp->elements[idx].cpu; |
| <------>u64 orig_dl = cp->elements[idx].dl; |
| |
| <------>if (idx == 0) |
| <------><------>return; |
| |
| <------>do { |
| <------><------>p = parent(idx); |
| <------><------>if (dl_time_before(orig_dl, cp->elements[p].dl)) |
| <------><------><------>break; |
| <------><------> |
| <------><------>cp->elements[idx].cpu = cp->elements[p].cpu; |
| <------><------>cp->elements[idx].dl = cp->elements[p].dl; |
| <------><------>cp->elements[cp->elements[idx].cpu].idx = idx; |
| <------><------>idx = p; |
| <------>} while (idx != 0); |
| <------> |
| <------>cp->elements[idx].cpu = orig_cpu; |
| <------>cp->elements[idx].dl = orig_dl; |
| <------>cp->elements[cp->elements[idx].cpu].idx = idx; |
| } |
| |
| static void cpudl_heapify(struct cpudl *cp, int idx) |
| { |
| <------>if (idx > 0 && dl_time_before(cp->elements[parent(idx)].dl, |
| <------><------><------><------>cp->elements[idx].dl)) |
| <------><------>cpudl_heapify_up(cp, idx); |
| <------>else |
| <------><------>cpudl_heapify_down(cp, idx); |
| } |
| |
| static inline int cpudl_maximum(struct cpudl *cp) |
| { |
| <------>return cp->elements[0].cpu; |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| int cpudl_find(struct cpudl *cp, struct task_struct *p, |
| <------> struct cpumask *later_mask) |
| { |
| <------>const struct sched_dl_entity *dl_se = &p->dl; |
| |
| <------>if (later_mask && |
| <------> cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) { |
| <------><------>unsigned long cap, max_cap = 0; |
| <------><------>int cpu, max_cpu = -1; |
| |
| <------><------>if (!static_branch_unlikely(&sched_asym_cpucapacity)) |
| <------><------><------>return 1; |
| |
| <------><------> |
| <------><------>for_each_cpu(cpu, later_mask) { |
| <------><------><------>if (!dl_task_fits_capacity(p, cpu)) { |
| <------><------><------><------>cpumask_clear_cpu(cpu, later_mask); |
| |
| <------><------><------><------>cap = capacity_orig_of(cpu); |
| |
| <------><------><------><------>if (cap > max_cap || |
| <------><------><------><------> (cpu == task_cpu(p) && cap == max_cap)) { |
| <------><------><------><------><------>max_cap = cap; |
| <------><------><------><------><------>max_cpu = cpu; |
| <------><------><------><------>} |
| <------><------><------>} |
| <------><------>} |
| |
| <------><------>if (cpumask_empty(later_mask)) |
| <------><------><------>cpumask_set_cpu(max_cpu, later_mask); |
| |
| <------><------>return 1; |
| <------>} else { |
| <------><------>int best_cpu = cpudl_maximum(cp); |
| |
| <------><------>WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); |
| |
| <------><------>if (cpumask_test_cpu(best_cpu, p->cpus_ptr) && |
| <------><------> dl_time_before(dl_se->deadline, cp->elements[0].dl)) { |
| <------><------><------>if (later_mask) |
| <------><------><------><------>cpumask_set_cpu(best_cpu, later_mask); |
| |
| <------><------><------>return 1; |
| <------><------>} |
| <------>} |
| <------>return 0; |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| void cpudl_clear(struct cpudl *cp, int cpu) |
| { |
| <------>int old_idx, new_cpu; |
| <------>unsigned long flags; |
| |
| <------>WARN_ON(!cpu_present(cpu)); |
| |
| <------>raw_spin_lock_irqsave(&cp->lock, flags); |
| |
| <------>old_idx = cp->elements[cpu].idx; |
| <------>if (old_idx == IDX_INVALID) { |
| <------><------> |
| <------><------> * Nothing to remove if old_idx was invalid. |
| <------><------> * This could happen if a rq_offline_dl is |
| <------><------> * called for a CPU without -dl tasks running. |
| <------><------> */ |
| <------>} else { |
| <------><------>new_cpu = cp->elements[cp->size - 1].cpu; |
| <------><------>cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; |
| <------><------>cp->elements[old_idx].cpu = new_cpu; |
| <------><------>cp->size--; |
| <------><------>cp->elements[new_cpu].idx = old_idx; |
| <------><------>cp->elements[cpu].idx = IDX_INVALID; |
| <------><------>cpudl_heapify(cp, old_idx); |
| |
| <------><------>cpumask_set_cpu(cpu, cp->free_cpus); |
| <------>} |
| <------>raw_spin_unlock_irqrestore(&cp->lock, flags); |
| } |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| void cpudl_set(struct cpudl *cp, int cpu, u64 dl) |
| { |
| <------>int old_idx; |
| <------>unsigned long flags; |
| |
| <------>WARN_ON(!cpu_present(cpu)); |
| |
| <------>raw_spin_lock_irqsave(&cp->lock, flags); |
| |
| <------>old_idx = cp->elements[cpu].idx; |
| <------>if (old_idx == IDX_INVALID) { |
| <------><------>int new_idx = cp->size++; |
| |
| <------><------>cp->elements[new_idx].dl = dl; |
| <------><------>cp->elements[new_idx].cpu = cpu; |
| <------><------>cp->elements[cpu].idx = new_idx; |
| <------><------>cpudl_heapify_up(cp, new_idx); |
| <------><------>cpumask_clear_cpu(cpu, cp->free_cpus); |
| <------>} else { |
| <------><------>cp->elements[old_idx].dl = dl; |
| <------><------>cpudl_heapify(cp, old_idx); |
| <------>} |
| |
| <------>raw_spin_unlock_irqrestore(&cp->lock, flags); |
| } |
| |
| |
| |
| |
| |
| |
| void cpudl_set_freecpu(struct cpudl *cp, int cpu) |
| { |
| <------>cpumask_set_cpu(cpu, cp->free_cpus); |
| } |
| |
| |
| |
| |
| |
| |
| void cpudl_clear_freecpu(struct cpudl *cp, int cpu) |
| { |
| <------>cpumask_clear_cpu(cpu, cp->free_cpus); |
| } |
| |
| |
| |
| |
| |
| int cpudl_init(struct cpudl *cp) |
| { |
| <------>int i; |
| |
| <------>raw_spin_lock_init(&cp->lock); |
| <------>cp->size = 0; |
| |
| <------>cp->elements = kcalloc(nr_cpu_ids, |
| <------><------><------> sizeof(struct cpudl_item), |
| <------><------><------> GFP_KERNEL); |
| <------>if (!cp->elements) |
| <------><------>return -ENOMEM; |
| |
| <------>if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) { |
| <------><------>kfree(cp->elements); |
| <------><------>return -ENOMEM; |
| <------>} |
| |
| <------>for_each_possible_cpu(i) |
| <------><------>cp->elements[i].idx = IDX_INVALID; |
| |
| <------>return 0; |
| } |
| |
| |
| |
| |
| |
| void cpudl_cleanup(struct cpudl *cp) |
| { |
| <------>free_cpumask_var(cp->free_cpus); |
| <------>kfree(cp->elements); |
| } |
| |