Library rt.implementation.basic.schedule
Require Import rt.util.all.
Require Import rt.model.basic.job rt.model.basic.arrival_sequence rt.model.basic.schedule
rt.model.basic.platform rt.model.basic.priority.
Module ConcreteScheduler.
Import Job ArrivalSequence Schedule Platform Priority.
Section Implementation.
Context {Job: eqType}.
Variable job_cost: Job → time.
(* Let num_cpus denote the number of processors, ...*)
Variable num_cpus: nat.
(* ... and let arr_seq be any arrival sequence.*)
Variable arr_seq: arrival_sequence Job.
(* Assume a JLDP policy is given. *)
Variable higher_eq_priority: JLDP_policy arr_seq.
(* Consider the list of pending jobs at time t. *)
Definition jobs_pending_at (sched: schedule num_cpus arr_seq) (t: time) :=
[seq j <- jobs_arrived_up_to arr_seq t | pending job_cost sched j t].
(* Next, we sort this list by priority. *)
Definition sorted_pending_jobs (sched: schedule num_cpus arr_seq) (t: time) :=
sort (higher_eq_priority t) (jobs_pending_at sched t).
(* Starting from the empty schedule as a base, ... *)
Definition empty_schedule : schedule num_cpus arr_seq :=
fun t cpu ⇒ None.
(* ..., we redefine the mapping of jobs to processors at any time t as follows.
The i-th job in the sorted list is assigned to the i-th cpu, or to None
if the list is short. *)
Definition update_schedule (prev_sched: schedule num_cpus arr_seq)
(t_next: time) : schedule num_cpus arr_seq :=
fun cpu t ⇒
if t = t_next then
nth_or_none (sorted_pending_jobs prev_sched t) cpu
else prev_sched cpu t.
(* The schedule is iteratively constructed by applying assign_jobs at every time t, ... *)
Fixpoint schedule_prefix (t_max: time) : schedule num_cpus arr_seq :=
if t_max is t_prev.+1 then
(* At time t_prev + 1, schedule jobs that have not completed by time t_prev. *)
update_schedule (schedule_prefix t_prev) t_prev.+1
else
(* At time 0, schedule any jobs that arrive. *)
update_schedule empty_schedule 0.
Definition scheduler (cpu: processor num_cpus) (t: time) := (schedule_prefix t) cpu t.
End Implementation.
Section Proofs.
Context {Job: eqType}.
Variable job_cost: Job → time.
(* Assume a positive number of processors. *)
Variable num_cpus: nat.
Hypothesis H_at_least_one_cpu: num_cpus > 0.
(* Let arr_seq be any arrival sequence of jobs where ...*)
Variable arr_seq: arrival_sequence Job.
(* ...jobs have positive cost and...*)
Hypothesis H_job_cost_positive:
∀ (j: JobIn arr_seq), job_cost_positive job_cost j.
(* ... at any time, there are no duplicates of the same job. *)
Hypothesis H_arrival_sequence_is_a_set :
arrival_sequence_is_a_set arr_seq.
(* Consider any JLDP policy higher_eq_priority that is transitive and total. *)
Variable higher_eq_priority: JLDP_policy arr_seq.
Hypothesis H_priority_transitive: ∀ t, transitive (higher_eq_priority t).
Hypothesis H_priority_total: ∀ t, total (higher_eq_priority t).
(* Let sched denote our concrete scheduler implementation. *)
Let sched := scheduler job_cost num_cpus arr_seq higher_eq_priority.
(* Next, we provide some helper lemmas about the scheduler construction. *)
Section HelperLemmas.
(* Let's use a shorter name for the schedule prefix function. *)
Let sched_prefix := schedule_prefix job_cost num_cpus arr_seq higher_eq_priority.
(* First, we show that the scheduler preserves its prefixes. *)
Lemma scheduler_same_prefix :
∀ t t_max cpu,
t ≤ t_max →
sched_prefix t_max cpu t = sched cpu t.
(* With respect to the sorted list of pending jobs, ...*)
Let sorted_jobs (t: time) :=
sorted_pending_jobs job_cost num_cpus arr_seq higher_eq_priority sched t.
(* ..., we show that a job is mapped to a processor based on that list, ... *)
Lemma scheduler_nth_or_none_mapping :
∀ t cpu x,
sched cpu t = x →
nth_or_none (sorted_jobs t) cpu = x.
(* ..., a scheduled job is mapped to a cpu corresponding to its position, ... *)
Lemma scheduler_nth_or_none_scheduled :
∀ j t,
scheduled sched j t →
∃ (cpu: processor num_cpus),
nth_or_none (sorted_jobs t) cpu = Some j.
(* ..., and that a backlogged job has a position larger than or equal to the number
of processors. *)
Lemma scheduler_nth_or_none_backlogged :
∀ j t,
backlogged job_cost sched j t →
∃ i,
nth_or_none (sorted_jobs t) i = Some j ∧ i ≥ num_cpus.
End HelperLemmas.
(* Now, we prove the important properties about the implementation. *)
(* Jobs do not execute before they arrive, ...*)
Theorem scheduler_jobs_must_arrive_to_execute:
jobs_must_arrive_to_execute sched.
(* ..., jobs are sequential, ... *)
Theorem scheduler_sequential_jobs: sequential_jobs sched.
(* ... and jobs do not execute after completion. *)
Theorem scheduler_completed_jobs_dont_execute:
completed_jobs_dont_execute job_cost sched.
(* In addition, the scheduler is work conserving ... *)
Theorem scheduler_work_conserving:
work_conserving job_cost sched.
(* ... and enforces the JLDP policy. *)
Theorem scheduler_enforces_policy :
enforces_JLDP_policy job_cost sched higher_eq_priority.
End Proofs.
End ConcreteScheduler.
Require Import rt.model.basic.job rt.model.basic.arrival_sequence rt.model.basic.schedule
rt.model.basic.platform rt.model.basic.priority.
Module ConcreteScheduler.
Import Job ArrivalSequence Schedule Platform Priority.
Section Implementation.
Context {Job: eqType}.
Variable job_cost: Job → time.
(* Let num_cpus denote the number of processors, ...*)
Variable num_cpus: nat.
(* ... and let arr_seq be any arrival sequence.*)
Variable arr_seq: arrival_sequence Job.
(* Assume a JLDP policy is given. *)
Variable higher_eq_priority: JLDP_policy arr_seq.
(* Consider the list of pending jobs at time t. *)
Definition jobs_pending_at (sched: schedule num_cpus arr_seq) (t: time) :=
[seq j <- jobs_arrived_up_to arr_seq t | pending job_cost sched j t].
(* Next, we sort this list by priority. *)
Definition sorted_pending_jobs (sched: schedule num_cpus arr_seq) (t: time) :=
sort (higher_eq_priority t) (jobs_pending_at sched t).
(* Starting from the empty schedule as a base, ... *)
Definition empty_schedule : schedule num_cpus arr_seq :=
fun t cpu ⇒ None.
(* ..., we redefine the mapping of jobs to processors at any time t as follows.
The i-th job in the sorted list is assigned to the i-th cpu, or to None
if the list is short. *)
Definition update_schedule (prev_sched: schedule num_cpus arr_seq)
(t_next: time) : schedule num_cpus arr_seq :=
fun cpu t ⇒
if t = t_next then
nth_or_none (sorted_pending_jobs prev_sched t) cpu
else prev_sched cpu t.
(* The schedule is iteratively constructed by applying assign_jobs at every time t, ... *)
Fixpoint schedule_prefix (t_max: time) : schedule num_cpus arr_seq :=
if t_max is t_prev.+1 then
(* At time t_prev + 1, schedule jobs that have not completed by time t_prev. *)
update_schedule (schedule_prefix t_prev) t_prev.+1
else
(* At time 0, schedule any jobs that arrive. *)
update_schedule empty_schedule 0.
Definition scheduler (cpu: processor num_cpus) (t: time) := (schedule_prefix t) cpu t.
End Implementation.
Section Proofs.
Context {Job: eqType}.
Variable job_cost: Job → time.
(* Assume a positive number of processors. *)
Variable num_cpus: nat.
Hypothesis H_at_least_one_cpu: num_cpus > 0.
(* Let arr_seq be any arrival sequence of jobs where ...*)
Variable arr_seq: arrival_sequence Job.
(* ...jobs have positive cost and...*)
Hypothesis H_job_cost_positive:
∀ (j: JobIn arr_seq), job_cost_positive job_cost j.
(* ... at any time, there are no duplicates of the same job. *)
Hypothesis H_arrival_sequence_is_a_set :
arrival_sequence_is_a_set arr_seq.
(* Consider any JLDP policy higher_eq_priority that is transitive and total. *)
Variable higher_eq_priority: JLDP_policy arr_seq.
Hypothesis H_priority_transitive: ∀ t, transitive (higher_eq_priority t).
Hypothesis H_priority_total: ∀ t, total (higher_eq_priority t).
(* Let sched denote our concrete scheduler implementation. *)
Let sched := scheduler job_cost num_cpus arr_seq higher_eq_priority.
(* Next, we provide some helper lemmas about the scheduler construction. *)
Section HelperLemmas.
(* Let's use a shorter name for the schedule prefix function. *)
Let sched_prefix := schedule_prefix job_cost num_cpus arr_seq higher_eq_priority.
(* First, we show that the scheduler preserves its prefixes. *)
Lemma scheduler_same_prefix :
∀ t t_max cpu,
t ≤ t_max →
sched_prefix t_max cpu t = sched cpu t.
(* With respect to the sorted list of pending jobs, ...*)
Let sorted_jobs (t: time) :=
sorted_pending_jobs job_cost num_cpus arr_seq higher_eq_priority sched t.
(* ..., we show that a job is mapped to a processor based on that list, ... *)
Lemma scheduler_nth_or_none_mapping :
∀ t cpu x,
sched cpu t = x →
nth_or_none (sorted_jobs t) cpu = x.
(* ..., a scheduled job is mapped to a cpu corresponding to its position, ... *)
Lemma scheduler_nth_or_none_scheduled :
∀ j t,
scheduled sched j t →
∃ (cpu: processor num_cpus),
nth_or_none (sorted_jobs t) cpu = Some j.
(* ..., and that a backlogged job has a position larger than or equal to the number
of processors. *)
Lemma scheduler_nth_or_none_backlogged :
∀ j t,
backlogged job_cost sched j t →
∃ i,
nth_or_none (sorted_jobs t) i = Some j ∧ i ≥ num_cpus.
End HelperLemmas.
(* Now, we prove the important properties about the implementation. *)
(* Jobs do not execute before they arrive, ...*)
Theorem scheduler_jobs_must_arrive_to_execute:
jobs_must_arrive_to_execute sched.
(* ..., jobs are sequential, ... *)
Theorem scheduler_sequential_jobs: sequential_jobs sched.
(* ... and jobs do not execute after completion. *)
Theorem scheduler_completed_jobs_dont_execute:
completed_jobs_dont_execute job_cost sched.
(* In addition, the scheduler is work conserving ... *)
Theorem scheduler_work_conserving:
work_conserving job_cost sched.
(* ... and enforces the JLDP policy. *)
Theorem scheduler_enforces_policy :
enforces_JLDP_policy job_cost sched higher_eq_priority.
End Proofs.
End ConcreteScheduler.