net/sched: taprio: rename close_time to end_time

There is a confusion in terms in taprio which makes what is called
"close_time" to be actually used for 2 things:

1. determining when an entry "closes" such that transmitted skbs are
   never allowed to overrun that time (?!)
2. an aid for determining when to advance and/or restart the schedule
   using the hrtimer

It makes more sense to call this so-called "close_time" "end_time",
because it's not clear at all to me what "closes". Future patches will
hopefully make better use of the term "to close".

This is an absolutely mechanical change.

Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Vladimir Oltean 2023-02-07 15:54:32 +02:00 коммит произвёл David S. Miller
Родитель a306a90c8f
Коммит e551755111
1 изменённых файлов: 26 добавлений и 26 удалений

Просмотреть файл

@ -45,11 +45,11 @@ struct sched_entry {
u64 gate_duration[TC_MAX_QUEUE];
struct list_head list;
/* The instant that this entry "closes" and the next one
/* The instant that this entry ends and the next one
* should open, the qdisc will make some effort so that no
* packet leaves after this time.
*/
ktime_t close_time;
ktime_t end_time;
ktime_t next_txtime;
atomic_t budget;
int index;
@ -66,7 +66,7 @@ struct sched_gate_list {
struct rcu_head rcu;
struct list_head entries;
size_t num_entries;
ktime_t cycle_close_time;
ktime_t cycle_end_time;
s64 cycle_time;
s64 cycle_time_extension;
s64 base_time;
@ -606,7 +606,7 @@ static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq,
* guard band ...
*/
if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
ktime_after(guard, entry->close_time))
ktime_after(guard, entry->end_time))
return NULL;
/* ... and no budget. */
@ -738,7 +738,7 @@ static bool should_restart_cycle(const struct sched_gate_list *oper,
if (list_is_last(&entry->list, &oper->entries))
return true;
if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0)
if (ktime_compare(entry->end_time, oper->cycle_end_time) == 0)
return true;
return false;
@ -746,7 +746,7 @@ static bool should_restart_cycle(const struct sched_gate_list *oper,
static bool should_change_schedules(const struct sched_gate_list *admin,
const struct sched_gate_list *oper,
ktime_t close_time)
ktime_t end_time)
{
ktime_t next_base_time, extension_time;
@ -755,18 +755,18 @@ static bool should_change_schedules(const struct sched_gate_list *admin,
next_base_time = sched_base_time(admin);
/* This is the simple case, the close_time would fall after
/* This is the simple case, the end_time would fall after
* the next schedule base_time.
*/
if (ktime_compare(next_base_time, close_time) <= 0)
if (ktime_compare(next_base_time, end_time) <= 0)
return true;
/* This is the cycle_time_extension case, if the close_time
/* This is the cycle_time_extension case, if the end_time
* plus the amount that can be extended would fall after the
* next schedule base_time, we can extend the current schedule
* for that amount.
*/
extension_time = ktime_add_ns(close_time, oper->cycle_time_extension);
extension_time = ktime_add_ns(end_time, oper->cycle_time_extension);
/* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
* how precisely the extension should be made. So after
@ -785,7 +785,7 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer)
struct sched_gate_list *oper, *admin;
struct sched_entry *entry, *next;
struct Qdisc *sch = q->root;
ktime_t close_time;
ktime_t end_time;
spin_lock(&q->current_entry_lock);
entry = rcu_dereference_protected(q->current_entry,
@ -804,41 +804,41 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer)
* entry of all schedules are pre-calculated during the
* schedule initialization.
*/
if (unlikely(!entry || entry->close_time == oper->base_time)) {
if (unlikely(!entry || entry->end_time == oper->base_time)) {
next = list_first_entry(&oper->entries, struct sched_entry,
list);
close_time = next->close_time;
end_time = next->end_time;
goto first_run;
}
if (should_restart_cycle(oper, entry)) {
next = list_first_entry(&oper->entries, struct sched_entry,
list);
oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time,
oper->cycle_time);
oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time,
oper->cycle_time);
} else {
next = list_next_entry(entry, list);
}
close_time = ktime_add_ns(entry->close_time, next->interval);
close_time = min_t(ktime_t, close_time, oper->cycle_close_time);
end_time = ktime_add_ns(entry->end_time, next->interval);
end_time = min_t(ktime_t, end_time, oper->cycle_end_time);
if (should_change_schedules(admin, oper, close_time)) {
if (should_change_schedules(admin, oper, end_time)) {
/* Set things so the next time this runs, the new
* schedule runs.
*/
close_time = sched_base_time(admin);
end_time = sched_base_time(admin);
switch_schedules(q, &admin, &oper);
}
next->close_time = close_time;
next->end_time = end_time;
taprio_set_budget(q, next);
first_run:
rcu_assign_pointer(q->current_entry, next);
spin_unlock(&q->current_entry_lock);
hrtimer_set_expires(&q->advance_timer, close_time);
hrtimer_set_expires(&q->advance_timer, end_time);
rcu_read_lock();
__netif_schedule(sch);
@ -1076,8 +1076,8 @@ static int taprio_get_start_time(struct Qdisc *sch,
return 0;
}
static void setup_first_close_time(struct taprio_sched *q,
struct sched_gate_list *sched, ktime_t base)
static void setup_first_end_time(struct taprio_sched *q,
struct sched_gate_list *sched, ktime_t base)
{
struct sched_entry *first;
ktime_t cycle;
@ -1088,9 +1088,9 @@ static void setup_first_close_time(struct taprio_sched *q,
cycle = sched->cycle_time;
/* FIXME: find a better place to do this */
sched->cycle_close_time = ktime_add_ns(base, cycle);
sched->cycle_end_time = ktime_add_ns(base, cycle);
first->close_time = ktime_add_ns(base, first->interval);
first->end_time = ktime_add_ns(base, first->interval);
taprio_set_budget(q, first);
rcu_assign_pointer(q->current_entry, NULL);
}
@ -1756,7 +1756,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (admin)
call_rcu(&admin->rcu, taprio_free_sched_cb);
} else {
setup_first_close_time(q, new_admin, start);
setup_first_end_time(q, new_admin, start);
/* Protects against advance_sched() */
spin_lock_irqsave(&q->current_entry_lock, flags);