Library rt.implementation.apa.schedule
Require Import rt.util.all.
Require Import rt.model.apa.job rt.model.apa.task rt.model.apa.affinity
rt.model.apa.arrival_sequence rt.model.apa.schedule
rt.model.apa.platform rt.model.apa.priority.
Module ConcreteScheduler.
Import Job SporadicTaskset ArrivalSequence Schedule Platform
Priority Affinity.
(* In this section, we implement a concrete weak APA scheduler. *)
Section Implementation.
Context {Job: eqType}.
Context {sporadic_task: eqType}.
Variable job_cost: Job → time.
Variable job_task: Job → sporadic_task.
(* Let num_cpus denote the number of processors, ...*)
Variable num_cpus: nat.
(* ... and let arr_seq be any arrival sequence.*)
Variable arr_seq: arrival_sequence Job.
(* Let alpha be an affinity associated with each task. *)
Variable alpha: task_affinity sporadic_task num_cpus.
(* Assume a JLDP policy is given. *)
Variable higher_eq_priority: JLDP_policy arr_seq.
(* Consider the list of pending jobs at time t, ... *)
Definition jobs_pending_at (sched: schedule num_cpus arr_seq) (t: time) :=
[seq j <- jobs_arrived_up_to arr_seq t | pending job_cost sched j t].
(* ... which we sort by decreasing priority. *)
Definition sorted_pending_jobs (sched: schedule num_cpus arr_seq) (t: time) :=
sort (higher_eq_priority t) (jobs_pending_at sched t).
(* Now we implement the algorithm that generates the APA schedule. *)
(* Given a job j at time t, we first define a predicate that states
whether j should preempt a mapping (cpu, x), where x is either Some j'
that is currently mapped to cpu or None. *)
Definition should_be_scheduled (j: JobIn arr_seq) (t: time) p :=
let '(cpu, mapped_job) := p in
if mapped_job is Some j' then (* If there is a job j', check the priority and affinity. *)
(can_execute_on alpha (job_task j) cpu) ∧
¬ (higher_eq_priority t j' j)
else (* Else, if cpu is idle, check only the affinity. *)
(can_execute_on alpha (job_task j) cpu).
(* Next, using the "should_be_scheduled" predicate, we define a function
that tries to schedule job j by updating a list of mappings.
It does so by replacing the first pair (cpu, x) where j can be
scheduled (if it exists). *)
Definition update_available_cpu t allocation j :=
replace_first (should_be_scheduled j t) (* search for processors that j can preempt *)
(set_pair_2nd (Some j)) (* replace the mapping in that processor with j *)
allocation. (* list of mappings *)
(* Using the fuction "update_available_cpu", we now define an iteration
that tries to successively schedule each job in a list job_list.
Starting with an empty mapping,
<(cpu0, None), (cpu1, None), (cpu2, None), ...>,
it tries to schedule each job in job_list and yields some updated list:
<(cpu0, None), (cpu1, Some j5), (cpu2, j9), ...>. *)
Definition try_to_schedule_every_job job_list t :=
foldl (update_available_cpu t)
(zip (enum (processor num_cpus)) (nseq num_cpus None)) (* empty mapping*)
job_list.
(* The iteration we just defined is then applied to the list of pending jobs
at any time t. *)
Definition schedule_every_pending_job prev_sched t :=
try_to_schedule_every_job (sorted_pending_jobs prev_sched t) t.
(* The schedule can now be constructed iteratively. Starting from the empty schedule, ... *)
Definition empty_schedule : schedule num_cpus arr_seq :=
fun cpu t ⇒ None.
(* ..., we update the schedule at time t by calling schedule_every_pending_job with
the list of pending jobs at time t, and then converting the result to a function. *)
Definition update_schedule (prev_sched: schedule num_cpus arr_seq)
(t_next: time) : schedule num_cpus arr_seq :=
fun cpu t ⇒
if t = t_next then
pairs_to_function None (schedule_every_pending_job prev_sched t) cpu
else prev_sched cpu t.
(* This allows us to iteratively construct the schedule up to some time t_max. *)
Fixpoint schedule_prefix (t_max: time) : schedule num_cpus arr_seq :=
if t_max is t_prev.+1 then
(* At time t_prev + 1, schedule jobs that have not completed by time t_prev. *)
update_schedule (schedule_prefix t_prev) t_prev.+1
else
(* At time 0, schedule any jobs that arrive. *)
update_schedule empty_schedule 0.
(* Finally, the prefixes are used to build the complete schedule. *)
Definition scheduler (cpu: processor num_cpus) (t: time) := (schedule_prefix t) cpu t.
End Implementation.
(* In this section, we prove several properties about the scheduling algorithm we
implemented. For example, we show that it is APA work conserving. *)
Section Proofs.
Context {Job: eqType}.
Context {sporadic_task: eqType}.
Variable job_cost: Job → time.
Variable job_task: Job → sporadic_task.
(* Assume a positive number of processors. *)
Variable num_cpus: nat.
Hypothesis H_at_least_one_cpu: num_cpus > 0.
(* Let alpha be an affinity associated with each task. *)
Variable alpha: task_affinity sporadic_task num_cpus.
(* Let arr_seq be any arrival sequence of jobs where ...*)
Variable arr_seq: arrival_sequence Job.
(* ...jobs have positive cost and...*)
Hypothesis H_job_cost_positive:
∀ (j: JobIn arr_seq), job_cost_positive job_cost j.
(* ... at any time, there are no duplicates of the same job. *)
Hypothesis H_arrival_sequence_is_a_set :
arrival_sequence_is_a_set arr_seq.
(* Consider any JLDP policy higher_eq_priority that is transitive and total. *)
Variable higher_eq_priority: JLDP_policy arr_seq.
Hypothesis H_priority_transitive: ∀ t, transitive (higher_eq_priority t).
Hypothesis H_priority_total: ∀ t, total (higher_eq_priority t).
(* Let sched denote our concrete scheduler implementation. *)
Let sched := scheduler job_cost job_task num_cpus arr_seq alpha higher_eq_priority.
(* Next, we provide some helper lemmas about the scheduler construction. *)
Section HelperLemmas.
(* Let sched_prefix denote the prefix construction of our scheduler. *)
Let sched_prefix := schedule_prefix job_cost job_task num_cpus arr_seq alpha higher_eq_priority.
(* We begin by showing that the scheduler preserves its prefixes. *)
Lemma scheduler_same_prefix :
∀ t t_max cpu,
t ≤ t_max →
sched_prefix t_max cpu t = sched cpu t.
(* To avoid many parameters, let's also rename the scheduling function.
We make a generic version (for any list of jobs l), ... *)
Let schedule_jobs t l := try_to_schedule_every_job job_task num_cpus arr_seq alpha higher_eq_priority t l.
(* ... and a specific version (for the pending jobs in sched). *)
Let schedule_pending_jobs t := schedule_jobs (sorted_pending_jobs job_cost num_cpus arr_seq higher_eq_priority sched t) t.
(* Next, we show that there are no duplicate cpus in the mapping. *)
Lemma scheduler_uniq_cpus :
∀ t l,
uniq (unzip1 (schedule_jobs l t)).
(* Next, we show that if a job j is in the mapping, then j must come from the list
of jobs l used in the construction. *)
Lemma scheduler_job_in_mapping :
∀ l j t cpu,
(cpu, Some j) ∈ schedule_jobs l t → j ∈ l.
(* Next, we prove that if a pair (cpu, j) is in the mapping, then
cpu must be part of j's affinity. *)
Lemma scheduler_mapping_respects_affinity :
∀ j t cpu,
(cpu, Some j) ∈ schedule_pending_jobs t →
can_execute_on alpha (job_task j) cpu.
(* Next, we show that the mapping does not schedule the same job j in two
different cpus. *)
Lemma scheduler_has_no_duplicate_jobs :
∀ j t cpu1 cpu2,
(cpu1, Some j) ∈ schedule_pending_jobs t →
(cpu2, Some j) ∈ schedule_pending_jobs t →
cpu1 = cpu2.
(* Based on the definition of the schedule, a job j is scheduled on cpu
iff (cpu, Some j) is the final mapping. *)
Lemma scheduler_scheduled_on :
∀ j cpu t,
scheduled_on sched j cpu t = ((cpu, Some j) ∈ schedule_pending_jobs t).
(* Now we show that for every cpu, there is always a pair in the mapping. *)
Lemma scheduler_has_cpus :
∀ cpu t l,
∃ x,
(cpu, x) ∈ schedule_jobs l t.
(* Next, consider a list of jobs l that is sorted by priority and does not have
duplicates.
We prove that for any job j in l, if j is not scheduled at time t,
then every cpu in j's affinity has some job mapped at time t. *)
Lemma scheduler_mapping_is_work_conserving :
∀ j cpu t l,
j ∈ l →
sorted (higher_eq_priority t) l →
uniq l →
(∀ cpu, (cpu, Some j) ∉ schedule_jobs l t) →
can_execute_on alpha (job_task j) cpu →
∃ j_other,
(cpu, Some j_other) ∈ schedule_jobs l t.
(* Next, we prove that the mapping enforces priority. *)
Lemma scheduler_priority :
∀ j j_hp cpu t,
backlogged job_cost sched j t →
can_execute_on alpha (job_task j) cpu →
scheduled_on sched j_hp cpu t →
higher_eq_priority t j_hp j.
End HelperLemmas.
(* Now, we prove the important properties about the implementation. *)
(* Jobs do not execute before they arrive, ...*)
Theorem scheduler_jobs_must_arrive_to_execute:
jobs_must_arrive_to_execute sched.
(* ..., jobs are sequential, ... *)
Theorem scheduler_sequential_jobs: sequential_jobs sched.
(* ... and jobs do not execute after completion. *)
Theorem scheduler_completed_jobs_dont_execute:
completed_jobs_dont_execute job_cost sched.
(* In addition, the scheduler is APA work conserving, ... *)
Theorem scheduler_apa_work_conserving:
apa_work_conserving job_cost job_task sched alpha.
(* ..., respects affinities, ... *)
Theorem scheduler_respects_affinity:
respects_affinity job_task sched alpha.
(* ... and enforces the JLDP policy under weak APA scheduling. *)
Theorem scheduler_enforces_policy :
enforces_JLDP_policy_under_weak_APA job_cost job_task sched alpha higher_eq_priority.
End Proofs.
End ConcreteScheduler.
Require Import rt.model.apa.job rt.model.apa.task rt.model.apa.affinity
rt.model.apa.arrival_sequence rt.model.apa.schedule
rt.model.apa.platform rt.model.apa.priority.
Module ConcreteScheduler.
Import Job SporadicTaskset ArrivalSequence Schedule Platform
Priority Affinity.
(* In this section, we implement a concrete weak APA scheduler. *)
Section Implementation.
Context {Job: eqType}.
Context {sporadic_task: eqType}.
Variable job_cost: Job → time.
Variable job_task: Job → sporadic_task.
(* Let num_cpus denote the number of processors, ...*)
Variable num_cpus: nat.
(* ... and let arr_seq be any arrival sequence.*)
Variable arr_seq: arrival_sequence Job.
(* Let alpha be an affinity associated with each task. *)
Variable alpha: task_affinity sporadic_task num_cpus.
(* Assume a JLDP policy is given. *)
Variable higher_eq_priority: JLDP_policy arr_seq.
(* Consider the list of pending jobs at time t, ... *)
Definition jobs_pending_at (sched: schedule num_cpus arr_seq) (t: time) :=
[seq j <- jobs_arrived_up_to arr_seq t | pending job_cost sched j t].
(* ... which we sort by decreasing priority. *)
Definition sorted_pending_jobs (sched: schedule num_cpus arr_seq) (t: time) :=
sort (higher_eq_priority t) (jobs_pending_at sched t).
(* Now we implement the algorithm that generates the APA schedule. *)
(* Given a job j at time t, we first define a predicate that states
whether j should preempt a mapping (cpu, x), where x is either Some j'
that is currently mapped to cpu or None. *)
Definition should_be_scheduled (j: JobIn arr_seq) (t: time) p :=
let '(cpu, mapped_job) := p in
if mapped_job is Some j' then (* If there is a job j', check the priority and affinity. *)
(can_execute_on alpha (job_task j) cpu) ∧
¬ (higher_eq_priority t j' j)
else (* Else, if cpu is idle, check only the affinity. *)
(can_execute_on alpha (job_task j) cpu).
(* Next, using the "should_be_scheduled" predicate, we define a function
that tries to schedule job j by updating a list of mappings.
It does so by replacing the first pair (cpu, x) where j can be
scheduled (if it exists). *)
Definition update_available_cpu t allocation j :=
replace_first (should_be_scheduled j t) (* search for processors that j can preempt *)
(set_pair_2nd (Some j)) (* replace the mapping in that processor with j *)
allocation. (* list of mappings *)
(* Using the fuction "update_available_cpu", we now define an iteration
that tries to successively schedule each job in a list job_list.
Starting with an empty mapping,
<(cpu0, None), (cpu1, None), (cpu2, None), ...>,
it tries to schedule each job in job_list and yields some updated list:
<(cpu0, None), (cpu1, Some j5), (cpu2, j9), ...>. *)
Definition try_to_schedule_every_job job_list t :=
foldl (update_available_cpu t)
(zip (enum (processor num_cpus)) (nseq num_cpus None)) (* empty mapping*)
job_list.
(* The iteration we just defined is then applied to the list of pending jobs
at any time t. *)
Definition schedule_every_pending_job prev_sched t :=
try_to_schedule_every_job (sorted_pending_jobs prev_sched t) t.
(* The schedule can now be constructed iteratively. Starting from the empty schedule, ... *)
Definition empty_schedule : schedule num_cpus arr_seq :=
fun cpu t ⇒ None.
(* ..., we update the schedule at time t by calling schedule_every_pending_job with
the list of pending jobs at time t, and then converting the result to a function. *)
Definition update_schedule (prev_sched: schedule num_cpus arr_seq)
(t_next: time) : schedule num_cpus arr_seq :=
fun cpu t ⇒
if t = t_next then
pairs_to_function None (schedule_every_pending_job prev_sched t) cpu
else prev_sched cpu t.
(* This allows us to iteratively construct the schedule up to some time t_max. *)
Fixpoint schedule_prefix (t_max: time) : schedule num_cpus arr_seq :=
if t_max is t_prev.+1 then
(* At time t_prev + 1, schedule jobs that have not completed by time t_prev. *)
update_schedule (schedule_prefix t_prev) t_prev.+1
else
(* At time 0, schedule any jobs that arrive. *)
update_schedule empty_schedule 0.
(* Finally, the prefixes are used to build the complete schedule. *)
Definition scheduler (cpu: processor num_cpus) (t: time) := (schedule_prefix t) cpu t.
End Implementation.
(* In this section, we prove several properties about the scheduling algorithm we
implemented. For example, we show that it is APA work conserving. *)
Section Proofs.
Context {Job: eqType}.
Context {sporadic_task: eqType}.
Variable job_cost: Job → time.
Variable job_task: Job → sporadic_task.
(* Assume a positive number of processors. *)
Variable num_cpus: nat.
Hypothesis H_at_least_one_cpu: num_cpus > 0.
(* Let alpha be an affinity associated with each task. *)
Variable alpha: task_affinity sporadic_task num_cpus.
(* Let arr_seq be any arrival sequence of jobs where ...*)
Variable arr_seq: arrival_sequence Job.
(* ...jobs have positive cost and...*)
Hypothesis H_job_cost_positive:
∀ (j: JobIn arr_seq), job_cost_positive job_cost j.
(* ... at any time, there are no duplicates of the same job. *)
Hypothesis H_arrival_sequence_is_a_set :
arrival_sequence_is_a_set arr_seq.
(* Consider any JLDP policy higher_eq_priority that is transitive and total. *)
Variable higher_eq_priority: JLDP_policy arr_seq.
Hypothesis H_priority_transitive: ∀ t, transitive (higher_eq_priority t).
Hypothesis H_priority_total: ∀ t, total (higher_eq_priority t).
(* Let sched denote our concrete scheduler implementation. *)
Let sched := scheduler job_cost job_task num_cpus arr_seq alpha higher_eq_priority.
(* Next, we provide some helper lemmas about the scheduler construction. *)
Section HelperLemmas.
(* Let sched_prefix denote the prefix construction of our scheduler. *)
Let sched_prefix := schedule_prefix job_cost job_task num_cpus arr_seq alpha higher_eq_priority.
(* We begin by showing that the scheduler preserves its prefixes. *)
Lemma scheduler_same_prefix :
∀ t t_max cpu,
t ≤ t_max →
sched_prefix t_max cpu t = sched cpu t.
(* To avoid many parameters, let's also rename the scheduling function.
We make a generic version (for any list of jobs l), ... *)
Let schedule_jobs t l := try_to_schedule_every_job job_task num_cpus arr_seq alpha higher_eq_priority t l.
(* ... and a specific version (for the pending jobs in sched). *)
Let schedule_pending_jobs t := schedule_jobs (sorted_pending_jobs job_cost num_cpus arr_seq higher_eq_priority sched t) t.
(* Next, we show that there are no duplicate cpus in the mapping. *)
Lemma scheduler_uniq_cpus :
∀ t l,
uniq (unzip1 (schedule_jobs l t)).
(* Next, we show that if a job j is in the mapping, then j must come from the list
of jobs l used in the construction. *)
Lemma scheduler_job_in_mapping :
∀ l j t cpu,
(cpu, Some j) ∈ schedule_jobs l t → j ∈ l.
(* Next, we prove that if a pair (cpu, j) is in the mapping, then
cpu must be part of j's affinity. *)
Lemma scheduler_mapping_respects_affinity :
∀ j t cpu,
(cpu, Some j) ∈ schedule_pending_jobs t →
can_execute_on alpha (job_task j) cpu.
(* Next, we show that the mapping does not schedule the same job j in two
different cpus. *)
Lemma scheduler_has_no_duplicate_jobs :
∀ j t cpu1 cpu2,
(cpu1, Some j) ∈ schedule_pending_jobs t →
(cpu2, Some j) ∈ schedule_pending_jobs t →
cpu1 = cpu2.
(* Based on the definition of the schedule, a job j is scheduled on cpu
iff (cpu, Some j) is the final mapping. *)
Lemma scheduler_scheduled_on :
∀ j cpu t,
scheduled_on sched j cpu t = ((cpu, Some j) ∈ schedule_pending_jobs t).
(* Now we show that for every cpu, there is always a pair in the mapping. *)
Lemma scheduler_has_cpus :
∀ cpu t l,
∃ x,
(cpu, x) ∈ schedule_jobs l t.
(* Next, consider a list of jobs l that is sorted by priority and does not have
duplicates.
We prove that for any job j in l, if j is not scheduled at time t,
then every cpu in j's affinity has some job mapped at time t. *)
Lemma scheduler_mapping_is_work_conserving :
∀ j cpu t l,
j ∈ l →
sorted (higher_eq_priority t) l →
uniq l →
(∀ cpu, (cpu, Some j) ∉ schedule_jobs l t) →
can_execute_on alpha (job_task j) cpu →
∃ j_other,
(cpu, Some j_other) ∈ schedule_jobs l t.
(* Next, we prove that the mapping enforces priority. *)
Lemma scheduler_priority :
∀ j j_hp cpu t,
backlogged job_cost sched j t →
can_execute_on alpha (job_task j) cpu →
scheduled_on sched j_hp cpu t →
higher_eq_priority t j_hp j.
End HelperLemmas.
(* Now, we prove the important properties about the implementation. *)
(* Jobs do not execute before they arrive, ...*)
Theorem scheduler_jobs_must_arrive_to_execute:
jobs_must_arrive_to_execute sched.
(* ..., jobs are sequential, ... *)
Theorem scheduler_sequential_jobs: sequential_jobs sched.
(* ... and jobs do not execute after completion. *)
Theorem scheduler_completed_jobs_dont_execute:
completed_jobs_dont_execute job_cost sched.
(* In addition, the scheduler is APA work conserving, ... *)
Theorem scheduler_apa_work_conserving:
apa_work_conserving job_cost job_task sched alpha.
(* ..., respects affinities, ... *)
Theorem scheduler_respects_affinity:
respects_affinity job_task sched alpha.
(* ... and enforces the JLDP policy under weak APA scheduling. *)
Theorem scheduler_enforces_policy :
enforces_JLDP_policy_under_weak_APA job_cost job_task sched alpha higher_eq_priority.
End Proofs.
End ConcreteScheduler.