(*include "Clight/Cexec.ma".*) include "Clight/MemProperties.ma". (*include "Clight/frontend_misc.ma".*) (* This file contains some definitions and lemmas related to memory injections. * The whole development is quite similar to the one described in the article by * Leroy & Blazy in J.A.R., except that in Cerco, we describe data down to the byte * level (similar to the memory model of CompCert v2 but not to the aforementioned * paper). * *) (* --------------------------------------------------------------------------- *) (* Aux lemmas that are likely to be found elsewhere. *) (* --------------------------------------------------------------------------- *) lemma zlt_succ : ∀a,b. Zltb a b = true → Zltb a (Zsucc b) = true. #a #b #HA lapply (Zltb_true_to_Zlt … HA) #HA_prop @Zlt_to_Zltb_true /2/ qed. lemma zlt_succ_refl : ∀a. Zltb a (Zsucc a) = true. #a @Zlt_to_Zltb_true /2/ qed. (* definition le_offset : offset → offset → bool. λx,y. Zleb (offv x) (offv y). *) lemma not_refl_absurd : ∀A:Type[0].∀x:A. x ≠ x → False. /2/. qed. lemma eqZb_reflexive : ∀x:Z. eqZb x x = true. #x /2/. qed. lemma eqZb_symmetric : ∀x,y:Z. eqZb x y = eqZb y x. #x #y @(eqZb_elim … x y) [ * >eqZb_z_z @refl | #Hneq >(eqZb_false … (sym_neq ??? Hneq)) @refl ] qed. (* TODO: move in frontend_misc. This axiom states that for small bitvectors, bitvector addition * commutes with conversion to Z, i.e. there is no overflow. *) axiom Z_addition_bv_commute : ∀n. ∀v1,v2:BitVector n. (Z_of_unsigned_bitvector ? v1) + (Z_of_unsigned_bitvector ? v2) < Z_two_power_nat n → Z_of_unsigned_bitvector ? (addition_n ? v1 v2) = (Z_of_unsigned_bitvector ? v1) + (Z_of_unsigned_bitvector ? v2). (* TODO: move in frontend_misc *) lemma Zlt_to_Zneq : ∀x,y:Z. x < y → x ≠ y. #x #y /3 by absurd, nmk/ qed. lemma Zlt_translate : ∀x,y,delta:Z. x < y → x + delta < y + delta. #x #y #delta /4 by monotonic_Zle_Zplus_r, Zle_to_Zlt, Zlt_to_Zle/ qed. lemma Zlt_weaken : ∀x,y,delta:Z. delta ≥ 0 → x < y → x < y + delta. #x #y #delta cases delta try // #p [ 2: #Habsurd @(False_ind … Habsurd) ] /3 by Zlt_to_Zle_to_Zlt, Zplus_le_pos/ qed. lemma Zlt_sum_weaken : ∀a,a',b,c:Z. a+b < c → a' < a → a'+b < c. #a #a' #b #c /3 by transitive_Zlt, Zlt_translate/ qed. (* TODO: move to Z.ma *) lemma monotonic_Zlt_Zsucc : monotonic Z Zlt Zsucc. #x #y cases x cases y /2/ [ #n /3 by Zle_to_Zlt_to_Zlt, monotonic_Zle_Zplus_r/ ] #n #m @(pos_cases … n) @(pos_cases … m) // [ #n /3 by I, Zle_to_Zlt, monotonic_Zle_Zplus_r/ | #m #n /3 by I, Zle_to_Zlt, monotonic_Zle_Zplus_r/ | /2 by Zlt_translate/ | /3 by Zle_to_Zlt, monotonic_Zle_Zplus_r/ | #n' /3 by Zle_to_Zlt_to_Zlt, lt_to_Zlt_neg_neg, Zlt_to_Zle/ | #n' #m' /3 by lt_plus_to_lt_r, lt_to_Zlt_neg_neg/ ] qed. lemma monotonic_Zlt_Zpred : monotonic Z Zlt Zpred. #x #y cases x cases y /2/ [ #n /3 by Zle_to_Zlt, monotonic_Zle_Zpred/ | #m #n @(pos_cases … n) @(pos_cases … m) [ /3 by monotonic_Zlt_Zsucc/ | /3 by refl, true_to_andb_true, Zltb_true_to_Zlt/ | #n' cases n' normalize [ #H inversion H [ #H' destruct | #m' #_ #_ #Habsurd @(absurd … Habsurd (not_eq_one_succ m')) ] | #p /3 by le_to_lt_to_lt, le_S_contradiction_1/ | #p /3 by le_to_lt_to_lt, le_S_contradiction_1/ ] | #m' #n' /3 by lt_plus_to_lt_r, lt_to_Zlt_pos_pos/ ] | #m #n /3 by Zplus_le_pos, Zle_to_Zlt/ ] qed. lemma monotonic_Zlt_Zplus_l: ∀n.monotonic Z Zlt (λm.m + n). #n cases n // #n' #a #b #H [ @(pos_elim … n') [ >sym_Zplus >(sym_Zplus b) <(Zsucc_Zplus_pos_O a) <(Zsucc_Zplus_pos_O b) @monotonic_Zlt_Zsucc @H | #n'' #H >(?:pos (succ n'') = Zsucc (pos n'')) // >(Zplus_Zsucc …) >(Zplus_Zsucc …) // cases a in H; cases b /2/ #p #H /3 by Zlt_to_Zle_to_Zlt, monotonic_Zle_Zsucc, monotonic_Zlt_Zsucc/ ] | @(pos_elim … n') [ >sym_Zplus >(sym_Zplus b) <(Zpred_Zplus_neg_O a) <(Zpred_Zplus_neg_O b) /2/; | #n'' #H >(?:neg (succ n'') = Zpred (neg n'')) // /3 by Zle_to_Zlt_to_Zlt, monotonic_Zlt_Zpred/ ] ] qed. lemma Zlt_to_Zle' : ∀x,y:Z. x < y → x ≤ y. #x #y /3 by Zle_to_Zlt_to_Zlt, Zlt_to_Zle/ qed. (* --------------------------------------------------------------------------- *) (* Some shorthands for conversion functions from BitVectorZ. *) (* --------------------------------------------------------------------------- *) (* Offsets are just bitvectors packed inside some annoying constructor. *) definition Zoo ≝ λx. Z_of_unsigned_bitvector ? (offv x). definition boo ≝ λx. mk_offset (bitvector_of_Z ? x). (* --------------------------------------------------------------------------- *) (* In the definition of injections below, we maintain a list of blocks that are writeable. We need some lemmas to reason on the fact that a block cannot be both writeable and not writeable, etc. *) (* --------------------------------------------------------------------------- *) (* When equality on A is decidable, [mem A elt l] is too. *) lemma mem_dec : ∀A:Type[0]. ∀eq:(∀a,b:A. a = b ∨ a ≠ b). ∀elt,l. mem A elt l ∨ ¬ (mem A elt l). #A #dec #elt #l elim l [ 1: normalize %2 /2/ | 2: #hd #tl #Hind elim (dec elt hd) [ 1: #Heq >Heq %1 /2/ | 2: #Hneq cases Hind [ 1: #Hmem %1 /2/ | 2: #Hnmem %2 normalize % #H cases H [ 1: lapply Hneq * #Hl #Eq @(Hl Eq) | 2: lapply Hnmem * #Hr #Hmem @(Hr Hmem) ] ] ] ] qed. lemma block_eq_dec : ∀a,b:block. a = b ∨ a ≠ b. * #a * #b cases (decidable_eq_Z … a b) [ * %1 @refl | #Hneq %2 % #H destruct @(absurd … (refl ??) Hneq) ] qed. lemma block_decidable_eq : ∀a,b:block. (a = b) + (a ≠ b). * #a * #b cases (decidable_eq_Z_Type … a b) [ * %1 @refl | #Hneq %2 % #H destruct @(absurd … (refl ??) Hneq) ] qed. lemma mem_not_mem_neq : ∀l,elt1,elt2. (mem block elt1 l) → ¬ (mem block elt2 l) → elt1 ≠ elt2. #l #elt1 #elt2 elim l [ 1: normalize #Habsurd @(False_ind … Habsurd) | 2: #hd #tl #Hind normalize #Hl #Hr cases Hl [ 1: #Heq >Heq @(eq_block_elim … hd elt2) [ 1: #Heq >Heq /2 by not_to_not/ | 2: #x @x ] | 2: #Hmem1 cases (mem_dec ? block_eq_dec ? tl) [ 1: #Hmem2 % #Helt_eq cases Hr #H @H %2 @Hmem2 | 2: #Hmem2 @Hind // ] ] ] qed. lemma neq_block_eq_block_false : ∀b1,b2:block. b1 ≠ b2 → eq_block b1 b2 = false. #b1 #b2 * #Hb @(eq_block_elim … b1 b2) [ 1: #Habsurd @(False_ind … (Hb Habsurd)) | 2: // ] qed. (* --------------------------------------------------------------------------- *) (* Lemmas related to freeing memory and pointer validity. *) (* --------------------------------------------------------------------------- *) lemma valid_pointer_ok_free : ∀b : block. ∀m,ptr. valid_pointer m ptr = true → pblock ptr ≠ b → valid_pointer (free m b) ptr = true. * (* #br *) #bid * #contents #next #posnext * * (* #pbr *) #pbid #poff normalize @(eqZb_elim pbid bid) [ 1: #Heqid >Heqid (* cases pbr cases br normalize *) cases (Zltb bid next) normalize nodelta [ 2: #Habsurd destruct (Habsurd) ] #_ #H @False_ind @(absurd … (refl ??) H) | 2: #Hneqid (* cases pbr cases br normalize *) try // ] qed. lemma free_list_cons : ∀m,b,tl. free_list m (b :: tl) = (free (free_list m tl) b). #m #b #tl whd in match (free_list ??) in ⊢ (??%%); whd in match (free ??); @refl qed. (* lemma valid_pointer_free_ok : ∀b : block. ∀m,ptr. valid_pointer (free m b) ptr = true → pblock ptr ≠ b → (* This hypothesis is necessary to handle the cse of "valid" pointers to empty *) valid_pointer m ptr = true. * #br #bid * #contents #next #posnext * * #pbr #pbid #poff @(eqZb_elim pbid bid) [ 1: #Heqid >Heqid cases pbr cases br normalize [ 1,4: #_ * #Habsurd @(False_ind … (Habsurd (refl ??))) ] cases (Zltb bid next) normalize nodelta [ 2,4: #Habsurd destruct (Habsurd) ] // | 2: #Hneqid cases pbr cases br normalize try // >(eqZb_false … Hneqid) normalize nodelta try // qed. lemma valid_pointer_free_list_ok : ∀blocks : list block. ∀m,ptr. valid_pointer (free_list m blocks) ptr = true → ¬ mem ? (pblock ptr) blocks → valid_pointer m ptr = true. #blocks elim blocks [ 1: #m #ptr normalize #H #_ @H | 2: #b #tl #Hind #m #ptr #Hvalid whd in match (mem block (pblock ptr) ?); >free_list_cons in Hvalid; #Hvalid * #Hguard lapply (valid_pointer_free_ok … Hvalid) #H @Hind [ 2: % #Heq @Hguard %2 @Heq | 1: @H % #Heq @Hguard %1 @Heq ] ] qed. *) (* An alternative version without the gard on the equality of blocks. *) (* lemma valid_pointer_free_ok_alt : ∀b : block. ∀m,ptr. valid_pointer (free m b) ptr = true → (pblock ptr) = b ∨ (pblock ptr ≠ b ∧ valid_pointer m ptr = true). * #br #bid * #contents #next #posnext * * #pbr #pbid #poff @(eq_block_elim … (mk_block br bid) (mk_block pbr pbid)) [ 1: #Heq #_ %1 @(sym_eq … Heq) | 2: cases pbr cases br normalize nodelta [ 1,4: @(eqZb_elim pbid bid) [ 1,3: #Heq >Heq * #H @(False_ind … (H (refl ??))) | 2,4: #Hneq #_ normalize >(eqZb_false … Hneq) normalize nodelta #H %2 @conj try assumption % #Habsurd destruct (Habsurd) elim Hneq #Hneq @Hneq try @refl ] | *: #_ #H %2 @conj try @H % #Habsurd destruct (Habsurd) ] ] qed. *) (* Well in fact a valid pointer can be valid even after a free. Ah. *) (* lemma pointer_in_free_list_not_valid : ∀blocks,m,ptr. mem ? (pblock ptr) blocks → valid_pointer (free_list m blocks) ptr = false. *) (* #blocks elim blocks [ 1: #m #ptr normalize #Habsurd @(False_ind … Habsurd) | 2: #b #tl #Hind #m #ptr whd in match (mem block (pblock ptr) ?); >free_list_cons * [ 1: #Hptr_match whd in match (free ??); whd in match (update_block ????); whd in match (valid_pointer ??); whd in match (low_bound ??); whd in match (high_bound ??); >Hptr_match >eq_block_identity normalize nodelta whd in match (low ?); cut (Zleb OZ (Z_of_unsigned_bitvector offset_size (offv (poff ptr))) = true) [ 1: elim (offv (poff ptr)) // #n' #hd #tl cases hd normalize [ 1: #_ try @refl | 2: #H @H ] ] #H >H cut (Zleb (Z_of_unsigned_bitvector offset_size (offv (poff ptr))) OZ = false) [ 1: elim (offv (poff ptr)) normalize #n' #hd #tl cases hd normalize [ 1: normalize #_ try @refl | 2: #H @H ] ] valid_pointer (free_list m blocks) ptr = false. *) (* --------------------------------------------------------------------------- *) (* Memory injections *) (* --------------------------------------------------------------------------- *) (* An embedding is a function from block ids to (blocks × offset). *) definition embedding ≝ block → option (block × offset). definition offset_plus : offset → offset → offset ≝ λo1,o2. mk_offset (addition_n ? (offv o1) (offv o2)). lemma offset_plus_0 : ∀o. offset_plus (mk_offset (zero ?)) o = o. * #o whd in match (offset_plus ??); >commutative_addition_n >addition_n_0 @refl qed. (* Translates a pointer through an embedding. *) definition pointer_translation : ∀p:pointer. ∀E:embedding. option pointer ≝ λp,E. match p with [ mk_pointer pblock poff ⇒ match E pblock with [ None ⇒ None ? | Some loc ⇒ let 〈dest_block,dest_off〉 ≝ loc in let ptr ≝ (mk_pointer dest_block (offset_plus poff dest_off)) in Some ? ptr ] ]. (* Definition of embedding compatibility. *) definition embedding_compatible : embedding → embedding → Prop ≝ λE,E'. ∀b:block. E b = None ? ∨ E b = E' b. (* We parameterise the "equivalence" relation on values with an embedding. *) (* Front-end values. *) inductive value_eq (E : embedding) : val → val → Prop ≝ | undef_eq : value_eq E Vundef Vundef | vint_eq : ∀sz,i. value_eq E (Vint sz i) (Vint sz i) | vnull_eq : value_eq E Vnull Vnull | vptr_eq : ∀p1,p2. pointer_translation p1 E = Some ? p2 → value_eq E (Vptr p1) (Vptr p2). (* [load_sim] states the fact that each successful load in [m1] is matched by a load in [m2] s.t. * the values are equivalent. *) definition load_sim ≝ λ(E : embedding).λ(m1 : mem).λ(m2 : mem). ∀b1,off1,b2,off2,ty,v1. valid_block m1 b1 → (* We need this because it is not provable from [load_value_of_type ty m1 b1 off1] when loading by-ref *) E b1 = Some ? 〈b2,off2〉 → load_value_of_type ty m1 b1 off1 = Some ? v1 → (∃v2. load_value_of_type ty m2 b2 (offset_plus off1 off2) = Some ? v2 ∧ value_eq E v1 v2). definition load_sim_ptr ≝ λ(E : embedding).λ(m1 : mem).λ(m2 : mem). ∀b1,off1,b2,off2,ty,v1. valid_pointer m1 (mk_pointer b1 off1) = true → (* We need this because it is not provable from [load_value_of_type ty m1 b1 off1] when loading by-ref *) pointer_translation (mk_pointer b1 off1) E = Some ? (mk_pointer b2 off2) → load_value_of_type ty m1 b1 off1 = Some ? v1 → (∃v2. load_value_of_type ty m2 b2 off2 = Some ? v2 ∧ value_eq E v1 v2). definition block_is_empty : mem → block → Prop ≝ λm,b. high_bound m b ≤ low_bound m b. (* Definition of a 16 bit bitvector-implementable Z *) definition z_implementable_bv : Z → Prop ≝ λz:Z. 0 ≤ z ∧ z < Z_two_power_nat 16. (* Characterizing implementable blocks *) definition block_implementable_bv ≝ λm:mem.λb:block. z_implementable_bv (low_bound m b) ∧ z_implementable_bv (high_bound m b). (* FIXME this is slightly restrictive. We could allow the high bound to be 2^16.*) (* Characterizing blocks compatible with an embedding. This condition ensures * that we do not stray out of memory. *) (* definition block_ok_with_embedding ≝ λE:embedding.λb,m. ∀b',delta. E b = Some ? 〈b', delta〉 → (high_bound m b) + (Zoo delta) < Z_two_power_nat 16. *) (* We have to prove that the blocks we allocate are compatible with a given embedding. This amounts to proving that the embedding cannot shift the new block outside the 2^16 bound. Notice that the following def. only constraints block mapped through the embedding. *) definition block_in_bounds_if_mapped : embedding → block → mem → mem → Prop ≝ λE.λb.λm,m'. match E b with [ None ⇒ True | Some loc ⇒ let 〈b', delta〉 ≝ loc in block_implementable_bv m b ∧ block_implementable_bv m' b' ∧ (high_bound m b) + (Zoo delta) < Z_two_power_nat 16 ]. (* Adapted from Compcert's Memory *) definition non_aliasing : embedding → mem → Prop ≝ λE,m. ∀b1,b2,b1',b2',delta1,delta2. b1 ≠ b2 → (* note that two blocks can be different only becuase of this region *) E b1 = Some ? 〈b1',delta1〉 → E b2 = Some ? 〈b2',delta2〉 → (if eq_block b1' b2' then (* block_is_empty m b1' ∨ *) high_bound m b1 + (Zoo delta1) ≤ low_bound m b2 + (Zoo delta2) ∨ high_bound m b2 + (Zoo delta2) ≤ low_bound m b1 + (Zoo delta1) ∨ block_is_empty m b1 ∨ block_is_empty m b2 else True). (* Adapted from Compcert's "Memory" *) (* Definition of a memory injection *) record memory_inj (E : embedding) (m1 : mem) (m2 : mem) : Prop ≝ { (* Load simulation *) mi_inj : load_sim_ptr E m1 m2; (* Invalid blocks are not mapped *) mi_freeblock : ∀b. ¬ (valid_block m1 b) → E b = None ?; (* Valid pointers are mapped to valid pointers *) mi_valid_pointers : ∀p,p'. valid_pointer m1 p = true → pointer_translation p E = Some ? p' → valid_pointer m2 p' = true; (* Blocks in the codomain are valid *) mi_nodangling : ∀b,b',o'. E b = Some ? 〈b',o'〉 → valid_block m2 b'; (* Regions are preserved *) mi_region : ∀b,b',o'. E b = Some ? 〈b',o'〉 → block_region b = block_region b'; (* sub-blocks do not overlap (non-aliasing property) *) mi_nonalias : non_aliasing E m1; (* mapped blocks are bitvector-implementable *) (* mi_implementable : ∀b,b',o'. E b = Some ? 〈b',o'〉 → block_implementable_bv m1 b ∧ block_implementable_bv m2 b'; *) (* The offset produced by the embedding shifts data within the 2^16 bound ... * We need this to rule out the following case: consider a huge block taking up * all the space [0; 2^16[. We can come up with an embedding that keeps the same block * but which shifts the data by some value, effectively rotating all the data around the * 2^16 offset limit. Valid pointers stay valid, the block is implementable ... But we * obviously can't prove that the semantics of pointer comparisons is preserved. *) mi_nowrap : ∀b. block_in_bounds_if_mapped E b m1 m2 }. (* ---------------------------------------------------------------------------- *) (* Setting up an empty injection *) (* ---------------------------------------------------------------------------- *) definition empty_injection : memory_inj (λx. None ?) empty empty. % [ whd #b1 #off1 #b2 #off2 #ty #v1 #Hvalid #Hptr_trans #Hload normalize in Hptr_trans; destruct | #b #H @refl | * #b #o #p' #_ normalize #Habsurd destruct | #b #b' #off #Habsurd destruct | #b #b' #o' #Habsurd destruct | whd #H1 #H2 #H3 #H4 #H5 #H6 #H7 #H8 #H9 destruct | #b whd @I ] qed. (* ---------------------------------------------------------------------------- *) (* End of the definitions on memory injections. Now, on to proving some lemmas. *) (* ---------------------------------------------------------------------------- *) (**** Lemmas on value_eq. *) (* particulars inversions. *) lemma value_eq_ptr_inversion : ∀E,p1,v. value_eq E (Vptr p1) v → ∃p2. v = Vptr p2 ∧ pointer_translation p1 E = Some ? p2. #E #p1 #v #Heq inversion Heq [ 1: #Habsurd destruct (Habsurd) | 2: #sz #i #Habsurd destruct (Habsurd) | 3: #Habsurd destruct (Habsurd) | 4: #p1' #p2 #Heq #Heqv #Heqv2 #_ destruct %{p2} @conj try @refl try assumption ] qed. lemma value_eq_int_inversion : ∀E,sz,i,v. value_eq E (Vint sz i) v → v = (Vint sz i). #E #sz #i #v #Heq inversion Heq [ 1: #Habsurd destruct (Habsurd) | 2: #sz #i #Heq #Heq' #_ @refl | 3: #Habsurd destruct (Habsurd) | 4: #p1 #p2 #Heq #Heqv #Heqv2 #_ destruct ] qed. lemma value_eq_int_inversion' : ∀E,sz,i,v. value_eq E v (Vint sz i) → v = (Vint sz i). #E #sz #i #v #Heq inversion Heq [ 1: #_ #Habsurd destruct (Habsurd) | 2: #sz #i #Heq #Heq' #_ @refl | 3: #_ #Habsurd destruct (Habsurd) | 4: #p1 #p2 #Heq #Heqv #Heqv2 #_ destruct ] qed. lemma value_eq_null_inversion : ∀E,v. value_eq E Vnull v → v = Vnull. #E #v #Heq inversion Heq // #p1 #p2 #Htranslate #Habsurd lapply (jmeq_to_eq ??? Habsurd) #Habsurd' destruct qed. lemma value_eq_null_inversion' : ∀E,v. value_eq E v Vnull → v = Vnull. #E #v #Heq inversion Heq // #p1 #p2 #Htranslate #_ #Habsurd lapply (jmeq_to_eq ??? Habsurd) #Habsurd' destruct qed. (* A cleaner version of inversion for [value_eq] *) lemma value_eq_inversion : ∀E,v1,v2. ∀P : val → val → Prop. value_eq E v1 v2 → (P Vundef Vundef) → (∀sz,i. P (Vint sz i) (Vint sz i)) → (P Vnull Vnull) → (∀p1,p2. pointer_translation p1 E = Some ? p2 → P (Vptr p1) (Vptr p2)) → P v1 v2. #E #v1 #v2 #P #Heq #H1 #H2 #H3 #H4 inversion Heq [ 1: #Hv1 #Hv2 #_ destruct @H1 | 2: #sz #i #Hv1 #Hv2 #_ destruct @H2 | 3: #Hv1 #Hv2 #_ destruct @H3 | 4: #p1 #p2 #Hembed #Hv1 #Hv2 #_ destruct @H4 // ] qed. (* Avoids the use of cut in toCminorCorrectness *) lemma value_eq_triangle_diagram : ∀E,v1,v2,v3. value_eq E v1 v2 → v2 = v3 → value_eq E v1 v3. #H1 #H2 #H3 #H4 #H5 #H6 destruct // qed. (* embedding compatibility preserves value equivalence *) (* This is lemma 65 of Leroy&Blazy. *) lemma embedding_compatibility_value_eq : ∀E,E'. embedding_compatible E E' → ∀v1,v2. value_eq E v1 v2 → value_eq E' v1 v2. #E #E' #Hcompat #v1 #v2 #Hvalue_eq @(value_eq_inversion … Hvalue_eq) try // #p1 #p2 whd in match (pointer_translation ??); cases p1 #b1 #o1 normalize nodelta cases (Hcompat b1) [ #Hnone >Hnone normalize nodelta #Habsurd destruct | #Heq >Heq #Hres %4 @Hres ] qed. (**** Lemmas on pointer validity wrt loads. *) (* If we succeed to load something by value from a 〈b,off〉 location, [b] is a valid block. *) (* lemma load_by_value_success_valid_block_aux : ∀ty,m,b,off,v. access_mode ty = By_value (typ_of_type ty) → load_value_of_type ty m b off = Some ? v → Zltb (block_id b) (nextblock m) = true. #ty #m * #brg #bid #off #v cases ty [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] whd in match (load_value_of_type ????); [ 1,6,7: #_ #Habsurd destruct (Habsurd) ] #Hmode [ 1,2,5: [ 1: elim sz ] normalize in match (typesize ?); whd in match (loadn ???); whd in match (beloadv ??); cases (Zltb bid (nextblock m)) normalize nodelta try // #Habsurd destruct (Habsurd) | *: normalize in Hmode; destruct (Hmode) ] qed. *) (* If we succeed in loading some data from a location, then this loc is a valid pointer. *) lemma load_by_value_success_valid_ptr_aux : ∀ty,m,b,off,v. access_mode ty = By_value (typ_of_type ty) → load_value_of_type ty m b off = Some ? v → Zltb (block_id b) (nextblock m) = true ∧ Zleb (low_bound m b) (Z_of_unsigned_bitvector ? (offv off)) = true ∧ Zltb (Z_of_unsigned_bitvector ? (offv off)) (high_bound m b) = true. #ty #m * (* #brg *) #bid #off #v cases ty [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] whd in match (load_value_of_type ????); [ 1,6,7: #_ #Habsurd destruct (Habsurd) ] #Hmode [ 1,2,5: [ 1: elim sz ] normalize in match (typesize ?); whd in match (loadn ???); whd in match (beloadv ??); cases (Zltb bid (nextblock m)) normalize nodelta cases (Zleb (low (blocks m (mk_block (*brg*) bid))) (Z_of_unsigned_bitvector offset_size (offv off))) cases (Zltb (Z_of_unsigned_bitvector offset_size (offv off)) (high (blocks m (mk_block (*brg*) bid)))) normalize nodelta #Heq destruct (Heq) try /3 by conj, refl/ | *: normalize in Hmode; destruct (Hmode) ] qed. lemma load_by_value_success_valid_block : ∀ty,m,b,off,v. access_mode ty = By_value (typ_of_type ty) → load_value_of_type ty m b off = Some ? v → valid_block m b. #ty #m #b #off #v #Haccess_mode #Hload @Zltb_true_to_Zlt elim (load_by_value_success_valid_ptr_aux ty m b off v Haccess_mode Hload) * // qed. lemma load_by_value_success_valid_pointer : ∀ty,m,b,off,v. access_mode ty = By_value (typ_of_type ty) → load_value_of_type ty m b off = Some ? v → valid_pointer m (mk_pointer b off) = true. #ty #m #b #off #v #Haccess_mode #Hload normalize elim (load_by_value_success_valid_ptr_aux ty m b off v Haccess_mode Hload) * #H1 #H2 #H3 >H1 >H2 normalize nodelta >H3 @refl qed. (**** Lemmas related to memory injections. *) (* Making explicit the contents of memory_inj for load_value_of_type. * Equivalent to Lemma 59 of Leroy & Blazy *) lemma load_value_of_type_inj : ∀E,m1,m2,b1,off1,v1,b2,off2,ty. memory_inj E m1 m2 → value_eq E (Vptr (mk_pointer b1 off1)) (Vptr (mk_pointer b2 off2)) → load_value_of_type ty m1 b1 off1 = Some ? v1 → ∃v2. load_value_of_type ty m2 b2 off2 = Some ? v2 ∧ value_eq E v1 v2. #E #m1 #m2 #b1 #off1 #v1 #b2 #off2 #ty #Hinj #Hvalue_eq lapply (value_eq_ptr_inversion … Hvalue_eq) * #p2 * #Hp2_eq #Hembed destruct lapply (refl ? (access_mode ty)) cases ty [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] normalize in ⊢ ((???%) → ?); #Hmode #Hyp [ 1,6,7: normalize in Hyp; destruct (Hyp) | 4,5: normalize in Hyp ⊢ %; destruct (Hyp) /3 by ex_intro, conj, vptr_eq/ ] lapply (load_by_value_success_valid_pointer … Hmode … Hyp) #Hvalid_pointer lapply (mi_inj … Hinj b1 off1 b2 off2 … Hvalid_pointer Hembed Hyp) #H @H qed. (* Lemmas pertaining to memory allocation *) (* lemma alloc_valid_block_not_eq : ∀m,m',z1,z2,(*r,*)new_block. alloc m z1 z2 (*r*) = 〈m', new_block〉 → ∀b. valid_block m b → b ≠ new_block. * #c #n #Hn #m' #z1 #z2 #new whd in ⊢ ((??%?) → ?); #Heq destruct #b whd in ⊢ (% → ?); cases b #bid #Hlt % #Heq destruct (Heq) @(absurd … Hlt (irreflexive_Zlt n)) qed. *) (* A valid block stays valid after an alloc. *) lemma alloc_valid_block_conservation : ∀m,m',z1,z2,(*r,*)new_block. alloc m z1 z2 (*r*) = 〈m', new_block〉 → ∀b. valid_block m b → valid_block m' b. #m #m' #z1 #z2 (* #r*) * #b' (* #Hregion_eq *) elim m #contents #nextblock #Hcorrect whd in match (alloc ???(*?*)); #Halloc destruct (Halloc) #b whd in match (valid_block ??) in ⊢ (% → %); /2/ qed. lemma Zle_split : ∀a,b:Z. a ≤ b → a ≠ b → a < b. #a #b elim a elim b try /2/ #p1 #p2 #Hle #Hneq whd @not_eq_to_le_to_lt try // % #Heq destruct cases Hneq #H @H @refl qed. (* A valid block /after/ an alloc was valid before if it is different * from the newly allocated one. *) lemma alloc_valid_block_conservation2 : ∀m,m',z1,z2,new_block. alloc m z1 z2 =〈m',new_block〉 → ∀b. b ≠ new_block → valid_block m' b → valid_block m b. #m #m' #z1 #z2 #new_block cases m #cont #next #Hnext whd in ⊢ ((??%%) → ?); #Heq destruct (Heq) #b #Hneq #Hvalid whd in Hvalid ⊢ %; cases b in Hneq Hvalid; #bid * #H #Hlt cut (bid ≠ next) [ % #Heq destruct @H @refl ] #Hneq @(Zle_split … Hneq) /3 by monotonic_Zle_Zpred, Zlt_to_Zle/ qed. lemma alloc_bounds_conservation : ∀m,m',z1,z2,new_block. alloc m z1 z2 =〈m',new_block〉 → ∀b. b ≠ new_block → low_bound m' b = low_bound m b ∧ high_bound m' b = high_bound m b. #m #m' #z1 #z2 #new #Halloc #b #Hneq cases m in Halloc; #c #n #Hn whd in ⊢ ((??%?) → ?); #Heq destruct @conj [ >unfold_low_bound whd in match (update_block ?????); >(neq_block_eq_block_false … Hneq) @refl | >unfold_high_bound whd in match (update_block ?????); >(neq_block_eq_block_false … Hneq) @refl ] qed. (* A valid pointer stays valid after an alloc *) lemma alloc_valid_pointer_conservation : ∀m,m',z1,z2,(*r,*)new_block. alloc m z1 z2 (*r*) = 〈m',new_block〉 → ∀p. (pblock p) ≠ new_block → valid_pointer m' p = true → valid_pointer m p = true. #m #m' #z1 #z2 (*#r*) #new_block cases m #cont #next #Hnext whd in ⊢ ((??%%) → ?); #Heq destruct (Heq) * #b #o #Hneq #Hvalid @valid_pointer_of_Prop lapply (valid_pointer_to_Prop … Hvalid) -Hvalid * * #Hblock_id >unfold_low_bound >unfold_high_bound >unfold_low_bound >unfold_high_bound whd in match (update_block ?????); whd in match (update_block ?????); cut (b ≠ (mk_block (*r*) next)) [ @(eq_block_elim … b (mk_block (*r*) next)) #H destruct try // @False_ind @(absurd … (refl ??) Hneq) ] #Hneqb >(not_eq_block … Hneqb) normalize nodelta #HA #HB % [ % ] try assumption cases b in Hneqb Hneq Hblock_id; (* #r'*) #id * #Hnext_not_id #Hneq #Hlt cut (id ≤ next) [ /3 by monotonic_Zle_Zpred, Zlt_to_Zle/ ] #Hle @Zle_split try @Hle % #Heq destruct @(absurd … (refl ??) Hneq) qed. (* Allocating a new zone produces a valid block. *) lemma alloc_valid_new_block : ∀m,m',z1,z2(*,r*),new_block. alloc m z1 z2 (*r*) = 〈m', new_block〉 → valid_block m' new_block. * #contents #nextblock #Hcorrect #m' #z1 #z2 (* #r *) * #new_block (* #Hregion_eq *) whd in match (alloc ???(*?*)); whd in match (valid_block ??); #Halloc destruct (Halloc) /2/ qed. lemma new_block_invalid_before_alloc : ∀m,m',z1,z2,new_block. alloc m z1 z2 = 〈m', new_block〉 → ¬valid_block m new_block. * #contents #nextblock #Hcorrect #m' #z1 #z2 (* #r *) * #new_block (* #Hregion_eq *) whd in match (alloc ???(*?*)); whd in match (valid_block ??); #Halloc destruct (Halloc) /2/ qed. (* All data in a valid block is unchanged after an alloc. *) lemma alloc_beloadv_conservation : ∀m,m',block,offset,z1,z2,(*r,*)new_block. valid_block m block → alloc m z1 z2 (*r*) = 〈m', new_block〉 → beloadv m (mk_pointer block offset) = beloadv m' (mk_pointer block offset). * #contents #next #Hcorrect #m' #block #offset #z1 #z2 (* #r*) #new_block #Hvalid #Halloc whd in Halloc:(??%?); destruct (Halloc) whd in match (beloadv ??) in ⊢ (??%%); lapply (Zlt_to_Zltb_true … Hvalid) #Hlt >Hlt >(zlt_succ … Hlt) normalize nodelta whd in match (update_block ?????); whd in match (eq_block ??); cut (eqZb (block_id block) next = false) [ lapply (Zltb_true_to_Zlt … Hlt) #Hlt' @eqZb_false /2 by not_to_not/ ] #Hneq >Hneq @refl qed. (* Lift [alloc_beloadv_conservation] to loadn *) lemma alloc_loadn_conservation : ∀m,m',z1,z2,(*r,*)new_block. alloc m z1 z2 (*r*) = 〈m', new_block〉 → ∀n. ∀block,offset. valid_block m block → loadn m (mk_pointer block offset) n = loadn m' (mk_pointer block offset) n. #m #m' #z1 #z2 (*#r*) #new_block #Halloc #n elim n try // #n' #Hind #block #offset #Hvalid_block whd in ⊢ (??%%); >(alloc_beloadv_conservation … Hvalid_block Halloc) cases (beloadv m' (mk_pointer block offset)) // #bev normalize nodelta whd in match (shift_pointer ???); >Hind try // qed. lemma alloc_load_value_of_type_conservation : ∀m,m',z1,z2(*,r*),new_block. alloc m z1 z2 (*r*) = 〈m', new_block〉 → ∀block,offset. valid_block m block → ∀ty. load_value_of_type ty m block offset = load_value_of_type ty m' block offset. #m #m' #z1 #z2 (*#r*) #new_block #Halloc #block #offset #Hvalid #ty cases ty [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] try // whd in match (load_value_of_type ????) in ⊢ (??%%); >(alloc_loadn_conservation … Halloc … Hvalid) @refl qed. (* Memory allocation in m2 preserves [memory_inj]. * This is lemma 66 from Leroy&Blazy. *) lemma alloc_memory_inj_m2 : ∀E:embedding. ∀m1,m2,m2',z1,z2(*,r*),new_block. ∀H:memory_inj E m1 m2. alloc m2 z1 z2 (*r*) = 〈m2', new_block〉 → block_implementable_bv m2' new_block → memory_inj E m1 m2'. #E #m1 #m2 #m2' #z1 #z2 (*#r*) * #new_block (* #Hregion *) #Hmemory_inj #Halloc #Himpl % [ whd #b1 #off1 #b2 #off2 #ty #v1 #Hvalid #Hembed #Hload elim (mi_inj E m1 m2 Hmemory_inj b1 off1 b2 off2 … ty v1 Hvalid Hembed Hload) #v2 * #Hload_eq #Hvalue_eq %{v2} @conj try // lapply (refl ? (access_mode ty)) cases ty in Hload_eq; [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] #Hload normalize in ⊢ ((???%) → ?); #Haccess [ 1,6,7: normalize in Hload; destruct (Hload) | 2,3,8: lapply (load_by_value_success_valid_block … Haccess Hload) #Hvalid_block <(alloc_load_value_of_type_conservation … Halloc … Hvalid_block) assumption | 4,5: whd in match (load_value_of_type ????) in Hload ⊢ %; @Hload ] | @(mi_freeblock … Hmemory_inj) | #p #p' #Hvalid #Hptr_trans lapply (mi_valid_pointers … Hmemory_inj p p' Hvalid Hptr_trans) elim m2 in Halloc; #contentmap #nextblock #Hnextblock elim p' * (*#br'*) #bid' #o' #Halloc whd in Halloc:(??%?) ⊢ ?; destruct (Halloc) whd in match (valid_pointer ??) in ⊢ (% → %); @Zltb_elim_Type0 [ 2: normalize #_ #Habsurd destruct (Habsurd) ] #Hbid' cut (Zltb bid' (Zsucc new_block) = true) [ lapply (Zlt_to_Zltb_true … Hbid') @zlt_succ ] #Hbid_succ >Hbid_succ whd in match (low_bound ??) in ⊢ (% → %); whd in match (high_bound ??) in ⊢ (% → %); whd in match (update_block ?????); whd in match (eq_block ??); cut (eqZb bid' new_block = false) [ 1: @eqZb_false /2 by not_to_not/ ] #Hbid_neq >Hbid_neq #H @H (* cases (eq_region br' r) normalize #H @H*) | #b #b' #o' #Hembed lapply (mi_nodangling … Hmemory_inj b b' o' Hembed) #H @(alloc_valid_block_conservation … Halloc … H) | @(mi_region … Hmemory_inj) | @(mi_nonalias … Hmemory_inj) | #b lapply (mi_nowrap … Hmemory_inj b) whd in ⊢ (% → %); lapply (mi_nodangling … Hmemory_inj b) cases (E b) normalize nodelta try // * #B #OFF normalize nodelta #Hguard * * #H1 #H2 #Hinbounds @conj try @conj try assumption @(eq_block_elim … (mk_block new_block) B) [ #H unfold_low_bound >unfold_high_bound #HA #HB @conj >unfold_low_bound >unfold_high_bound whd in match (update_block ?????); >(not_eq_block … (sym_neq ??? H)) normalize nodelta try assumption ] ] qed. (* Allocating in m1 such that the resulting block has no image in m2 preserves the injection. This is lemma 67 for Leroy&Blazy. The proof should proceed as in Blazy & Leroy. Notice that we must consider blocks with different regions but same id as equal. *) lemma alloc_memory_inj_m1 : ∀E:embedding. ∀m1,m2,m1',z1,z2,(*r,*)new_block. ∀H:memory_inj E m1 m2. alloc m1 z1 z2 (*r*) = 〈m1', new_block〉 → memory_inj (λx. if eq_block new_block x then None ? else E x) m1' m2 ∧ embedding_compatible E (λx. if eq_block new_block x then None ? else E x). #E #m1 #m2 #m1' #z1 #z2 (* #r*) #new_block (* #Hregion_eq *) #Hinj #Halloc cut (E new_block = None ?) [ @(mi_freeblock … Hinj) cases m1 in Hinj Halloc; #contents #next #Hpos #Hinj whd in ⊢ ((??%%) → ?); #Heq destruct (Heq) % #H whd in H; @(absurd … H (irreflexive_Zlt … next)) ] #Hempty cut (embedding_compatible E (λx. if eq_block new_block x then None (block×offset) else E x)) [ whd #b @(eq_block_elim … new_block b) normalize nodelta try // #Heq eq_block_b_b normalize nodelta #Habsurd destruct ] #Hneq #Hvalid whd in ⊢ ((??%?) → ?); @(eq_block_elim … new_block b1) normalize nodelta [ #Heq #Habsurd destruct (Habsurd) ] #Hneq_id #Hembed #Hload lapply (alloc_valid_pointer_conservation … Halloc (mk_pointer b1 off1) (sym_neq ??? Hneq_id) Hvalid) #Hvalid' cut (valid_block m1 b1) [ cases (valid_pointer_to_Prop … Hvalid') * #Hvalid_block #_ #_ @Hvalid_block ] #Hvalid_block lapply (alloc_load_value_of_type_conservation … Halloc … off1 … Hvalid_block ty) #Heq Heq >eq_block_b_b normalize nodelta cases m1 in Halloc; #contents #next #Hnext >unfold_high_bound >unfold_low_bound >unfold_high_bound >unfold_low_bound >unfold_high_bound >unfold_low_bound >unfold_high_bound >unfold_low_bound whd in ⊢ ((??%%) → ?); destruct (Heq) #Heq destruct (Heq) lapply (neq_block_eq_block_false … (sym_neq ??? Hneq1)) lapply (neq_block_eq_block_false … (sym_neq ??? Hneq2)) #Heq_block_2 #Heq_block_1 * [ * [ * | ] | ] whd in match (update_block ?????); >Heq_block_1 normalize nodelta whd in match (update_block ?????); >Heq_block_2 normalize nodelta #H try /4 by or_introl, or_intror, conj/ [ %1 %2 whd whd in H; >unfold_high_bound whd in match (update_block ?????); >unfold_low_bound whd in match (update_block ?????); >Heq_block_1 normalize nodelta @H | %2 whd whd in H; >unfold_high_bound whd in match (update_block ?????); >unfold_low_bound whd in match (update_block ?????); >Heq_block_1 >Heq_block_2 @H ] | #b whd @(eq_block_elim … new_block b) normalize nodelta try // #Hneq lapply (mi_nowrap … Hinj b) whd in ⊢ (% → ?); cases (E b) normalize nodelta try // * #bb #oo normalize nodelta * * * #HA #HB * #HC #HD #HE @conj try @conj try @conj lapply HE -HE lapply HD -HD lapply HC -HC lapply HB -HB lapply HA -HA >unfold_low_bound >unfold_low_bound [ >unfold_low_bound ] >unfold_high_bound >unfold_high_bound [ 2,5: >unfold_high_bound ] cases m1 in Halloc; #cont #next #Hnext whd in ⊢ ((??%%) → ?); #Heq destruct (Heq) try // whd in match (update_block ?????); cut (eq_block b (mk_block next) = false) [ 1,3,5: @neq_block_eq_block_false @(sym_neq … Hneq) | 2,4,6: #Heq_block >Heq_block // ] ] qed. (* loading by-value from a freshly allocated block yields either an undef value or * None. *) lemma load_by_value_after_alloc_undef : ∀m,m',z1,z2,b. alloc m z1 z2 = 〈m', b〉 → ∀ty. access_mode ty = By_value (typ_of_type ty) → ∀off. load_value_of_type ty m' b off = Some ? Vundef ∨ load_value_of_type ty m' b off = None ?. #m #m' #z1 #z2 (* * #br #bid *) #b cases m #contents #nextblock #Hnext whd in ⊢ ((??%?) → ?); #Heq destruct (Heq) #ty cases ty [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] whd in ⊢ ((??%?) → ?); #Heq destruct (Heq) #off whd in match (load_value_of_type ????); (* We can't generalize over the typesize, sadly. We thus perform three identical inductions. * Another solution would be to provide an ad-hoc lemma, but the goal is too ugly for that. *) [ lapply off -off generalize in match (typesize ?); #tsz | lapply off -off generalize in match (typesize ?); #tsz | lapply off -off generalize in match (typesize ?); #tsz ] elim tsz [ 1,3,5: #off whd in match (loadn ???); normalize nodelta %1 @refl | *: #tsz' #Hind #off whd in match (loadn ???); whd in match (beloadv ??); cases (Zltb ??) normalize nodelta try /1 by or_intror/ cases (Zleb ??) normalize nodelta try /1 by or_intror/ >andb_lsimpl_true cases (Zltb ??); normalize nodelta try /1 by or_intror/ whd in match (update_block ?????); >eq_block_b_b normalize nodelta cases (Hind (shift_offset 2 off (bitvector_of_nat 2 1))) cases (loadn ???) normalize nodelta try // #bevals #Heq %1 whd in match (be_to_fe_value ??); try @refl ] qed. (* In CompCert, "∀v. value_eq Vundef v" holds. Not in our developments. In order * to prove the equivalent of lemma 68 of L&B, we need to provide an additional * property of the target memory: the /target/ portion of memory we are going to map a block * to must only contains BVundef (whereas in CompCert, the fact that the source block * contains BVundefs is enough to proceed). * * The following definition expresses the fact that a portion of memory [z1; z2] contains only * BVundefs. * Note that the bounds are /inclusive/. We do not check for their validity. *) definition undef_memory_zone : mem → block → Z → Z → Prop ≝ λm,b,z1,z2. ∀i. z1 ≤ i → i ≤ z2 → contents (blocks m b) i = BVundef. (* Explictly stating the properties of alloc. *) lemma alloc_properties : ∀m,m',z1,z2(*,r*),b. alloc m z1 z2 (* r *) = 〈m',b〉 → low_bound m' b = z1 ∧ high_bound m' b = z2 ∧ undef_memory_zone m' b z1 z2. #m #m' #z1 #z2 * (* #br *) #bid cases m #contents #next #Hnext whd in ⊢ ((??%?) → ?); #Heq destruct (Heq) >unfold_low_bound >unfold_high_bound @conj try @conj /2 by refl, pair_destruct_2/ whd #i #H1 #H2 whd in match (update_block ?????); >eq_block_b_b normalize nodelta @refl qed. (* Embed a fresh block inside an unmapped portion of the target block. * This is the equivalent of lemma 68 of Leroy&Blazy. * Compared to L&B, an additional "undef_memory_zone" proof object must be provided. * We also constrain the memory bounds of the new block to be bitvector-implementable, * otherwise we can't prove that addition commutes with conversions between Z and * bitvectors. *) lemma alloc_memory_inj_m1_to_m2 : ∀E:embedding. ∀m1,m2,m1':mem. ∀z1,z2:Z. ∀target,new_block. ∀delta:offset. valid_block m2 target → block_implementable_bv m1' new_block → block_implementable_bv m2 target → block_region new_block = block_region target → (high_bound m1' new_block) + (Zoo delta) < Z_two_power_nat 16 → alloc m1 z1 z2 = 〈m1', new_block〉 → low_bound m2 target ≤ z1 + Zoo delta → z2 + Zoo delta ≤ high_bound m2 target → undef_memory_zone m2 target (z1 + Zoo delta) (z2 + Zoo delta) → (∀b,delta'. b ≠ new_block → E b = Some ? 〈target, delta'〉 → high_bound m1 b + (Zoo delta') ≤ z1 + (Zoo delta) ∨ z2 + (Zoo delta) ≤ low_bound m1 b + (Zoo delta')) → memory_inj E m1 m2 → memory_inj (λx. if eq_block new_block x then Some ? 〈target, delta〉 else E x) m1' m2. #E #m1 #m2 #m1' #z1 #z2 (* * #tr *) #target (* * * #newr *) #new_block (* * *) #delta #Hvalid #Himpl1 #Himpl2 #Hregion #Hno_overflow #Halloc #Hlow #Hhigh #Hundef_zone #Hno_interference #Hinj cut (E new_block = (None ?)) [ @(mi_freeblock ??? Hinj new_block) cases m1 in Hinj Halloc; #contents #next #Hpos #Hinj whd in ⊢ ((??%%) → ?); #Heq destruct (Heq) % #H whd in H; @(absurd … H (irreflexive_Zlt … next)) ] #Hnew_not_mapped cut (embedding_compatible E (λx. if eq_block new_block x then Some ? 〈target, delta〉 else E x)) [ whd #b @(eq_block_elim … new_block b) normalize nodelta try // #Heq (neq_block_eq_block_false … Hneq) normalize nodelta * #Hembed #Heq destruct (Heq) cases (valid_pointer_to_Prop … Hvalid') * #Hvalid_block' #Hlow #Hhigh lapply (alloc_valid_block_conservation2 … Halloc … (sym_neq ??? Hneq) Hvalid_block') #Hvalid_block lapply (alloc_load_value_of_type_conservation … Halloc b1 off1 … Hvalid_block ty) #Heq_load Hembed @refl ] (* conclude *) * #v2 * #Hload2 #Hvalue_eq %{v2} @conj [ @Hload2 | @(embedding_compatibility_value_eq … Hvalue_eq) @Hembedding_compatible ] | 1: (* we allegedly performed a load from a block with the same id as * the newly allocated block (which by def. of alloc contains only BVundefs) *) #Heq destruct (Heq) whd in Hembed:(??%?); >eq_block_b_b in Hembed; normalize nodelta #Heq_ptr (* cull out all boring cases *) lapply Hload -Hload cases ty [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] [ 1,6,7: whd in ⊢ ((??%?) → ?); #Habsurd destruct (Habsurd) | 4,5: whd in ⊢ ((??%?) → ?); #Heq destruct (Heq_ptr Heq) %{(Vptr (mk_pointer b2 (offset_plus off1 delta)))} @conj [ 1,3: @refl | *: %4 whd in ⊢ (??%?); >eq_block_b_b @refl ] ] (* Now: load-by-value cases. Outcome is either None or Undef. *) lapply (load_by_value_after_alloc_undef m1 m1' z1 z2 b1 Halloc) #Hload_eq [ lapply (Hload_eq (Tint sz sg) (refl ??) off1) | lapply (Hload_eq (Tpointer ptr_ty) (refl ??) off1) | lapply (Hload_eq (Tcomp_ptr id) (refl ??) off1) ] * (* None case: absurd *) [ 2,4,6: #Hload >Hload #Habsurd destruct (Habsurd) ] #Hload >Hload #Heq destruct (Heq) %{Vundef} @conj try // destruct (Heq_ptr) (* prove the validity of this pointer in order tu unfold load *) cut (valid_pointer m2 (mk_pointer b2 (offset_plus off1 delta))=true) [ 1,3,5: @valid_pointer_of_Prop @conj try @conj [ 1,4,7: assumption | 2,5,8: >unfold_low_bound lapply (alloc_properties … Halloc) * * >unfold_low_bound >unfold_high_bound #Hlow' #Hhigh' #Hundef lapply (valid_pointer_to_Prop … Hvalid') * * #Hblock_valid >unfold_low_bound >unfold_high_bound >Hlow' >Hhigh' #Hle_low #Hlt_high destruct cut (Z_of_unsigned_bitvector ? (offv (offset_plus off1 delta)) = Zoo off1 + Zoo delta) [ 1,3,5: >Z_addition_bv_commute try @refl @(transitive_Zlt ???? Hno_overflow) @monotonic_Zlt_Zplus_l @Hlt_high ] #Hsum_split cut (low (blocks m1' b1) + Zoo delta ≤ Z_of_unsigned_bitvector offset_size (offv (offset_plus off1 delta))) [ 1,3,5: >Hsum_split @monotonic_Zle_Zplus_l assumption ] @(transitive_Zle … Hlow) | 3,6,9: >unfold_high_bound lapply (alloc_properties … Halloc) * * >unfold_low_bound >unfold_high_bound #Hlow' #Hhigh' #Hundef lapply (valid_pointer_to_Prop … Hvalid') * * #Hblock_valid >unfold_low_bound >unfold_high_bound >Hlow' >Hhigh' #Hle_low #Hlt_high destruct cut (Z_of_unsigned_bitvector ? (offv (offset_plus off1 delta)) = Zoo off1 + Zoo delta) [ 1,3,5: >Z_addition_bv_commute try @refl @(transitive_Zlt ???? Hno_overflow) @monotonic_Zlt_Zplus_l @Hlt_high ] #Hsum_split cut (Z_of_unsigned_bitvector offset_size (offv (offset_plus off1 delta)) < high (blocks m1' b1) + Zoo delta) [ 1,3,5: >Hsum_split @monotonic_Zlt_Zplus_l assumption ] #Hlt_aux @(Zlt_to_Zle_to_Zlt … Hlt_aux Hhigh) ] ] -Hno_interference #Hvalid_ptr2 (* reformulate the goals in an induction friendly way *) [ cut (∃bvals. loadn m2 (mk_pointer b2 (mk_offset (addition_n ? (offv off1) (offv delta)))) (typesize (typ_of_type (Tint sz sg))) = Some ? bvals ∧ (bvals = [ ] ∨ (option_hd … bvals = Some ? BVundef))) [ 2: * #bvals * #Hloadn * #Heq destruct (Heq) whd in match (load_value_of_type ????); >Hloadn normalize nodelta try @refl cases bvals in Heq; try // #hd #tl normalize nodelta whd in ⊢ ((??%?) → ?); #Heq destruct (Heq) try @refl ] | cut (∃bvals. loadn m2 (mk_pointer b2 (mk_offset (addition_n ? (offv off1) (offv delta)))) (typesize (typ_of_type (Tpointer ptr_ty))) = Some ? bvals ∧ (bvals = [ ] ∨ (option_hd … bvals = Some ? BVundef))) [ 2: * #bvals * #Hloadn * #Heq destruct (Heq) whd in match (load_value_of_type ????); >Hloadn normalize nodelta try @refl cases bvals in Heq; try // #hd #tl normalize nodelta whd in ⊢ ((??%?) → ?); #Heq destruct (Heq) try @refl ] | cut (∃bvals. loadn m2 (mk_pointer b2 (mk_offset (addition_n ? (offv off1) (offv delta)))) (typesize (typ_of_type (Tcomp_ptr id))) = Some ? bvals ∧ (bvals = [ ] ∨ (option_hd … bvals = Some ? BVundef))) [ 2: * #bvals * #Hloadn * #Heq destruct (Heq) whd in match (load_value_of_type ????); >Hloadn normalize nodelta try @refl cases bvals in Heq; try // #hd #tl normalize nodelta whd in ⊢ ((??%?) → ?); #Heq destruct (Heq) try @refl ] ] cases (valid_pointer_to_Prop … Hvalid') -Hvalid' * #HA #HB #HC cases (valid_pointer_to_Prop … Hvalid_ptr2) -Hvalid_ptr2 * #HD whd in match (offset_plus ??); #HE #HF cases Himpl1 cases Himpl2 -Himpl1 -Himpl2 >unfold_low_bound >unfold_low_bound >unfold_high_bound >unfold_high_bound * #HG #HH * #HI #HJ * #HK #HL * #HM #HN lapply (alloc_properties … Halloc) * * #HO #HP #Hsrc_undef destruct (HO HP) cases (some_inversion ????? Hload) #bevals * #Hloadn #_ lapply Hloadn -Hloadn whd in match (typesize ?); [ generalize in match (S (pred_size_intsize sz)); | generalize in match (S (S O)); | generalize in match (S (S O)); ] #typesize lapply HE lapply HF lapply HB lapply HC -HE -HF -HB -HC lapply bevals lapply off1 elim typesize [ 1,3,5: #off #bevals #HC #HB #HF #HE #_ %{[ ]} @conj try @refl %1 @refl | *: #n #Hind #off #bevals #HC #HB #HF #HE #Hloadn cases (some_inversion ????? Hloadn) -Hloadn #bval1 * #Hbeloadv_eq #Hloadn cases (some_inversion ????? Hloadn) -Hloadn whd in match (shift_pointer ???); whd in match (shift_offset ???); #bevals1 * #Hloadn #Hbevals' whd in match (loadn ???); whd in match (beloadv ??); >(Zlt_to_Zltb_true … HD) normalize nodelta >(Zle_to_Zleb_true … HE) >(Zlt_to_Zltb_true … HF) normalize nodelta lapply (Hundef_zone (Z_of_unsigned_bitvector ? (addition_n offset_size (offv off) (offv delta))) ??) [ 1,4,7: >Z_addition_bv_commute [ 2,4,6: @(transitive_Zlt … Hno_overflow) @(monotonic_Zlt_Zplus_l) assumption | 1,3,5: @(monotonic_Zle_Zplus_l) /3 by Zle_to_Zlt_to_Zlt, Zlt_to_Zle/ ] | 2,5,8: >Z_addition_bv_commute [ 2,4,6: @(transitive_Zlt … Hno_overflow) @(monotonic_Zlt_Zplus_l) assumption | 1,3,5: @(monotonic_Zle_Zplus_l) /3 by Zle_to_Zlt_to_Zlt, Zlt_to_Zle/ ] ] #Hcontents_undef cases n in Hind Hloadn; [ 1,3,5: #_ normalize in match (loadn ???); normalize nodelta #Heq destruct >Hcontents_undef %{[BVundef]} @conj try @refl %2 @refl ] #n' #Hind #Hloadn lapply Hloadn #Hloadn' cases (some_inversion ????? Hloadn') -Hloadn' #bval * whd in match (beloadv ??); >(Zlt_to_Zltb_true … HA) normalize nodelta #Hbeloadv cases (if_opt_inversion ???? Hbeloadv) -Hbeloadv #Hbounds cases (andb_inversion … Hbounds) -Hbounds #Hle1 #Hlt1 #Hcontents1 #_ lapply (Hind (mk_offset (addition_n ? (offv off) (sign_ext 2 offset_size (bitvector_of_nat 2 1)))) bevals1 ????) (* lapply (Hind (mk_offset (addition_n ? (offv off) (sign_ext 2 offset_size (bitvector_of_nat 2 1)))) bevals1 ???? Hloadn) *) [ 1,6,11: @(transitive_Zle ??? HE) (commutative_addition_n ? (sign_ext ???) (offv delta)) >associative_addition_n >(Z_addition_bv_commute ?? (sign_ext ???)) [ 2,4,6: normalize in match (Z_of_unsigned_bitvector ? (sign_ext ???)); >(sym_Zplus ? (pos one)) (sym_Zplus ? (pos one)) Z_addition_bv_commute [ 2,4,6: @(transitive_Zlt … Hno_overflow) @(monotonic_Zlt_Zplus_l) @(Zltb_true_to_Zlt … Hlt1) | 1,3,5: @monotonic_Zlt_Zplus_l @(Zltb_true_to_Zlt … Hlt1) ] | 3,8,13: @(Zleb_true_to_Zle … Hle1) | 4,9,14: @(Zltb_true_to_Zlt … Hlt1) ] -Hind #Hind lapply (Hind Hloadn) * #bevals2 * #Hloadn2 #Hbe_to_fe whd in match (shift_pointer ???); whd in match (shift_offset ???); cases Hbe_to_fe [ 1,3,5: #Heq destruct (Heq) %{(contents (blocks m2 b2) (Z_of_unsigned_bitvector offset_size (addition_n offset_size (offv off) (offv delta))) ::[ ])} (commutative_addition_n ? (offv delta)) >associative_addition_n >Hloadn2 normalize nodelta @conj try @refl %2 >Hcontents_undef @refl | *: #Hhd_empty %{(contents (blocks m2 b2) (Z_of_unsigned_bitvector offset_size (addition_n offset_size (offv off) (offv delta))) :: bevals2)} (commutative_addition_n ? (offv delta)) >associative_addition_n >Hloadn2 normalize nodelta @conj try @refl %2 >Hcontents_undef @refl ] ] ] | #b lapply (alloc_valid_new_block … Halloc) @(eq_block_elim … new_block b) [ #Heq destruct #Ha #Hb @False_ind @(absurd … Ha Hb) | #Hneq #Hvalid_new #Hnot_valid normalize nodelta cut (¬valid_block m1 b) [ cases m1 in Halloc; #cont #next #Hnext whd in ⊢ ((??%?) → ?); #Heq destruct (Heq) % #H cases Hnot_valid #H' @H' whd whd in H; /2 by transitive_Zlt/ ] @ (mi_freeblock … Hinj) ] | * #blo #off #p' #Hvalid1 @(eq_block_elim … new_block blo) [ #Heq whd in match (pointer_translation ??); destruct (Heq) >eq_block_b_b normalize nodelta lapply (alloc_properties … Halloc) * * #Hlow_eq #Hhigh_eq #Hundef destruct cases (valid_pointer_to_Prop … Hvalid1) * #Hvalid_block1 #Hlow1 #Hhigh1 #Heq destruct (Heq) @valid_pointer_of_Prop @conj try @conj [ @Hvalid | @(transitive_Zle … Hlow) >Z_addition_bv_commute [ @monotonic_Zle_Zplus_l assumption | @(transitive_Zlt … Hno_overflow) @monotonic_Zlt_Zplus_l assumption ] | @(Zlt_to_Zle_to_Zlt … Hhigh) >Z_addition_bv_commute [ @monotonic_Zlt_Zplus_l assumption | @(transitive_Zlt … Hno_overflow) @monotonic_Zlt_Zplus_l assumption ] ] | #Hblocks_neq whd in match (pointer_translation ??); >(neq_block_eq_block_false … Hblocks_neq) normalize nodelta #Htranslate lapply (mi_valid_pointers ??? Hinj (mk_pointer blo off) p' ?) [ @(alloc_valid_pointer_conservation … Halloc … Hvalid1) @sym_neq assumption ] #H @H assumption ] | #b #b' #o' @(eq_block_elim … new_block b) [ #Heq destruct (Heq) normalize nodelta #Heq destruct (Heq) @Hvalid | #Hneq normalize nodelta #Hembed @(mi_nodangling ??? Hinj … Hembed) ] | #b #b' #o' @(eq_block_elim … new_block b) [ #Heq normalize nodelta #Heq' destruct (Heq Heq') @Hregion | #Hneq normalize nodelta #Hembed @(mi_region … Hinj … Hembed) ] | whd #b1 #b2 #b1' #b2' #delta1 #delta2 #Hneq cases (alloc_properties … Halloc) * #Hlow1 #Hhigh1 #Hundef @(eq_block_elim … new_block b1) normalize nodelta [ #Heq #Heq' destruct >(neq_block_eq_block_false … Hneq) normalize nodelta #Hembed (* lapply (alloc_bounds_conservation … Halloc) … *) @(eq_block_elim … b1' b2') normalize nodelta [ #Heq destruct (Heq) lapply (Hno_interference b2 ? (sym_neq ??? Hneq) Hembed) * cases (alloc_bounds_conservation … Halloc … (sym_neq ??? Hneq)) #HA #HB [ #H %1 %1 %2 >HB @H | #H %1 %1 %1 >HA @H ] | #Hneq @I ] | #Hneq' #Hembed @(eq_block_elim … new_block b2) normalize nodelta [ #Heq #Heq' destruct @(eq_block_elim … b1' b2') normalize nodelta [ #Heq destruct (Heq) lapply (Hno_interference b1 ? Hneq Hembed) * cases (alloc_bounds_conservation … Halloc … (sym_neq ??? Hneq')) #HA #HB [ #H %1 %1 %1 >HB @H | #H %1 %1 %2 >HA @H ] | #Hneq @I ] | #Hneq'' #Hembed' @(eq_block_elim … b1' b2') normalize nodelta [ #Heq'' destruct (Heq'') lapply (mi_nonalias … Hinj … Hneq … Hembed Hembed') >eq_block_b_b normalize nodelta cases (alloc_bounds_conservation … Halloc … (sym_neq ??? Hneq'')) #HA #HB cases (alloc_bounds_conservation … Halloc … (sym_neq ??? Hneq')) #HC #HD * [ * [ * | ] | ] [ #Hle %1 %1 %1 >HD >HA @Hle | #Hle %1 %1 %2 >HB >HC @Hle | #Hempty %1 %2 whd in Hempty ⊢ %; >HD >HC @Hempty | #Hempty %2 whd in Hempty ⊢ %; >HB >HA @Hempty ] | #_ @I ] ] ] | #b whd @(eq_block_elim … new_block b) normalize nodelta #Hneq destruct (Hneq) [ @conj try @conj try assumption | lapply (mi_nowrap … Hinj b) whd in ⊢ (% → ?); cases (alloc_bounds_conservation … Halloc … (sym_neq ??? Hneq)) #HA #HB cases (E b) normalize nodelta try // * #blo #off normalize nodelta * * #HA' #HB' #HC' @conj try @conj whd in HA'; whd in HB'; whd in match (block_implementable_bv ??); [ 1,2: >HA >HB assumption | *: >HB assumption ] ] ] qed. (* And show the compatibility of the new injection. *) lemma alloc_memory_inj_m1_to_m2_compat : ∀E:embedding. ∀m1,m2,m1':mem. ∀z1,z2:Z. ∀target,new_block. ∀delta:offset. alloc m1 z1 z2 = 〈m1', new_block〉 → memory_inj E m1 m2 → embedding_compatible E (λx. if eq_block new_block x then Some ? 〈target, delta〉 else E x). #E #m1 #m2 #m1' #z1 #z2 #target #new_block #delta #ALLOC #MI whd #b @eq_block_elim [ #E destruct %1 @(mi_freeblock … MI) /2/ | #_ %2 % ] qed. (* -------------------------------------- *) (* Lemmas pertaining to [free] *) (* Lemma 46 by Leroy&Blazy *) lemma free_load_sim_ptr_m1 : ∀E:embedding. ∀m1,m2,m1'. load_sim_ptr E m1 m2 → ∀b. free m1 b = m1' → load_sim_ptr E m1' m2. #E #m1 #m2 #m1' #Hinj #b #Hfree whd #b1 #off1 #b2 #off2 #ty #v1 #Hvalid #Htrans cases ty [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] [ 1,4,5,6,7: whd in ⊢ ((??%?) → ?); #Heq destruct (Heq) normalize %{(Vptr (mk_pointer b2 off2))} @conj try @refl %4 @Htrans ] #Hload Hbeloadv' normalize nodelta cases (some_inversion ????? Hloadn) -Hloadn #bevals * #Hloadn #Heq destruct (Heq) >(Hind … Hneq Hloadn) normalize nodelta @refl ] qed. lemma free_load_sim_ptr_m2 : ∀E:embedding. ∀m1,m2,m2'. load_sim_ptr E m1 m2 → ∀b. free m2 b = m2' → (∀b1,delta. E b1 = Some ? 〈b, delta〉 → ¬ (valid_block m1 b1) ∨ block_is_empty m1 b1) → load_sim_ptr E m1 m2'. #E #m1 #m2 #m2' #Hsim #b #Hfree #Hunmapped whd #b1 #off1 #b2 #off2 #ty #v1 #Hvalid #Htrans cases ty [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] [ 1,4,5,6,7: whd in ⊢ ((??%?) → ?); #Heq destruct (Heq) normalize %{(Vptr (mk_pointer b2 off2))} @conj try @refl %4 @Htrans ] lapply Htrans -Htrans lapply Hsim -Hsim lapply Hfree -Hfree cases m2 #contents2 #next2 #Hnext2 whd in ⊢ ((??%?) → ?); #Hm2' destruct #Hsim #Htrans cases (some_inversion ????? Htrans) * #b2' #off2' * #Hembed normalize nodelta #Heq destruct (Heq) #Hload lapply (Hsim … Hvalid … Htrans … Hload) * #v2' * #Hload_before_free #Hvalue_eq @(eq_block_elim … b2 b) [ 1,3,5: #Heq destruct lapply (Hunmapped b1 off2' Hembed) * [ 1,3,5: #Hnot_valid lapply (valid_pointer_to_Prop … Hvalid) -Hvalid whd in match (valid_block ??) in Hnot_valid; * * #Hvalid #_ #_ @False_ind cases Hnot_valid #H @H @Hvalid | *: whd in ⊢ (% → ?); #Hempty lapply (valid_pointer_to_Prop … Hvalid) -Hvalid * * #_ #Hlow #Hhigh cut ((low_bound m1 b1 < high_bound m1 b1)) [ 1,3,5: /2 by Zle_to_Zlt_to_Zlt, Zlt_to_Zle_to_Zlt/ ] #Hnonempty @False_ind -Hlow -Hhigh lapply (Zlt_to_Zle_to_Zlt … Hnonempty Hempty) #H cases (irreflexive_Zlt (low_bound m1 b1)) #H' @H' @H ] | *: #Hneq %{v2'} @conj try assumption whd in match (load_value_of_type ????) in Hload_before_free:(??%?) ⊢ (??%?); cases (some_inversion ????? Hload_before_free) -Hload_before_free #bevals * #Hloadn #Heq >(free_loadn_sim_ptr_m2 … Hloadn) normalize nodelta try assumption @sym_neq @Hneq ] qed. lemma free_block_is_empty : ∀m,m',b,b'. block_region b = block_region b' → block_is_empty m b → free m b' = m' → block_is_empty m' b. * #contents #nextblock #Hpos #m' #b #b' #Hregions_eq #HA #HB normalize in HB; Hregions_eq cases (block_region b') normalize nodelta @(eqZb_elim … (block_id b) (block_id b')) [ 1,3: #Heq normalize nodelta // | 2,4: #Hneq normalize nodelta @HA ] qed. lemma high_bound_freed_block : ∀m,b. high_bound (free m b) b = OZ. #m #b normalize cases (block_region b) normalize >eqZb_z_z normalize @refl qed. lemma low_bound_freed_block : ∀m,b. low_bound (free m b) b = (pos one). #m #b normalize cases (block_region b) normalize >eqZb_z_z normalize @refl qed. (* Lemma 49 in Leroy&Blazy *) lemma free_non_aliasing_m1 : ∀E:embedding. ∀m1,m2,m1'. memory_inj E m1 m2 → ∀b. free m1 b = m1' → non_aliasing E m1'. (* The proof proceeds by a first case analysis to see wether b lives in the same world as the non-aliasing blocks (if not : trivial), in this case we proceed to a second case analysis on the non-aliasing hypothesis in memory_inj. *) #E #m1 #m2 #m1' #Hinj #b #Hfree whd #b1 #b2 #b1' #b2' #delta1 #delta2 #Hneq #Hembed1 #Hembed2 @(eq_block_elim … b1' b2') normalize nodelta [ 2: try // ] #Hblocks_eq lapply Hembed2 -Hembed2 lapply Hembed1 -Hembed1 lapply Hblocks_eq -Hblocks_eq (*cases b1' #r1' #bid1' cases b2' #r2' #bid2' *) #Heq destruct (Heq) generalize in match b2'; #target #Hembed1 #Hembed2 lapply (mi_nonalias … Hinj … Hneq Hembed1 Hembed2) >eq_block_b_b normalize nodelta normalize nodelta high_bound_freed_block >low_bound_freed_block // | #Hneq1 @(eq_block_elim … b2 b) [ #Heq destruct (Heq) #Hcase %2 whd >high_bound_freed_block >low_bound_freed_block // ] ] #Hneq2 >unfold_high_bound >unfold_high_bound >unfold_high_bound >unfold_high_bound >unfold_low_bound >unfold_low_bound >unfold_low_bound >unfold_low_bound <(high_free_eq … Hneq1) <(high_free_eq … Hneq2) <(low_free_eq … Hneq1) <(low_free_eq … Hneq2) * [ 2: #H %2 whd >unfold_high_bound >unfold_low_bound <(low_free_eq … Hneq2) <(high_free_eq … Hneq2) @H ] * [ 2: #H %1%2 whd >unfold_high_bound >unfold_low_bound <(low_free_eq … Hneq1) <(high_free_eq … Hneq1) @H ] * #H [ %1 %1 %1 @H | %1 %1 %2 @H ] qed. (* Extending lemma 46 and 49 to memory injections *) lemma free_memory_inj_m1 : ∀E:embedding. ∀m1,m2,m1'. memory_inj E m1 m2 → ∀b. free m1 b = m1' → memory_inj E m1' m2. #E #m1 #m2 #m1' #Hinj #b #Hfree cases Hinj #Hload_sim #Hfreeblock #Hvalid #Hnodangling #Hregion #Hnonalias #Himplementable % try assumption [ @(free_load_sim_ptr_m1 … Hload_sim … Hfree) | #bb #Hnot_valid lapply (Hfreeblock bb) #HA @HA % #HB cases Hnot_valid #HC @HC unfold_low_bound >unfold_high_bound whd in match (update_block ?????); >(not_eq_block … (sym_neq ??? Hneq)) try @conj normalize nodelta cases HA try // | 1,3: #H destruct whd in match (free ??); whd [ >unfold_low_bound ] >unfold_high_bound whd in match (update_block ?????); >eq_block_b_b normalize nodelta try @conj try // % try // ] ] qed. (* Lifting lemma 47 to memory injs - note that we require a much stronger condition * on the freed block : no block of m1 can map to it. Not even an invalid block or * an empty one. * XXX this lemma is not given in Leroy&Blazy, and unless future developments requires * it I will comment it out. XXX *) lemma free_memory_inj_m2 : ∀E:embedding. ∀m1,m2,m2'. memory_inj E m1 m2 → ∀b. free m2 b = m2' → (∀b1,delta. E b1 = Some ? 〈b, delta〉 → (¬ (valid_block m1 b1)) ∨ block_is_empty m1 b1) → memory_inj E m1 m2'. #E #m1 #m2 #m2' #Hinj #b #Hfree #b_not_mapped % cases Hinj try // #Hload_sim #Hfreeblock #Hvalid #Hnodangling #Hregion #Hnonalias #Himpl [ @(free_load_sim_ptr_m2 … Hload_sim … Hfree) #b1 #delta #Hembed @(b_not_mapped … b1 delta Hembed) | @Hfreeblock | * #bp #op #p' #Hp_valid #Hptr_trans @(eq_block_elim … (pblock p') b) [ #Heq lapply (Hvalid … Hp_valid Hptr_trans) #Hp_valid' destruct (Heq) cases (some_inversion ????? Hptr_trans) * #bp' #op' * #Hembed normalize nodelta #Heq destruct (Heq) cases (b_not_mapped … Hembed) [ #Hnot_valid lapply (Hfreeblock … Hnot_valid) >Hembed #Habsurd destruct (Habsurd) | #Hempty @False_ind lapply (valid_pointer_to_Prop … Hp_valid) * * #_ whd in Hempty; #Hle #Hlt lapply (Zlt_to_Zle_to_Zlt … Hlt Hempty) #Hlt2 lapply (Zlt_to_Zle_to_Zlt … Hlt2 Hle) #Hlt3 @(absurd … Hlt3 (irreflexive_Zlt ?)) ] | #Hneq lapply (Hvalid … Hp_valid Hptr_trans) unfold_low_bound <(low_free_eq m2 ?? Hneq) @HB | >unfold_high_bound <(high_free_eq m2 ?? Hneq) @HC ] ] | #bb #b' #o' #Hembed lapply (Hnodangling … Hembed) unfold_low_bound | *: >unfold_high_bound ] whd in match (update_block ?????); @(eq_block_elim … BLO b) #H [ 1,3,5: destruct (H) normalize nodelta try // @conj try // | *: normalize nodelta cases HBLO try // ] ] qed. (* Lemma 64 of L&B on [freelist] *) lemma freelist_memory_inj_m1_m2 : ∀E:embedding. ∀m1,m2,m1',m2'. memory_inj E m1 m2 → ∀blocklist,b2. (∀b1,delta. E b1 = Some ? 〈b2, delta〉 → meml ? b1 blocklist) → free m2 b2 = m2' → free_list m1 blocklist = m1' → memory_inj E m1' m2'. #E #m1 #m2 #m1' #m2' #Hinj #blocklist #b2 #Hnot_mapped #Hfree2 #Hfree_list cut (memory_inj E m1' m2) [ unfold_high_bound >unfold_low_bound whd in match (update_block ?????); >eq_block_b_b normalize nodelta @I | >free_list_cons in Hfree_list; #Hfree_list #H unfold_high_bound >unfold_low_bound whd in match (update_block ?????); @(eq_block_elim … b1 hd) [ #Heq normalize nodelta @I | #Hneq normalize nodelta @(Hind … (mk_mem contents2 n2 Hn2) … (refl ??) … H) ] ] ] qed. (* ---------------------------------------------------------- *) (* Lemma 40 of the paper of Leroy & Blazy on the memory model * and store-related lemmas *) lemma bestorev_beloadv_conservation : ∀mA,mB,wb,wo,v. bestorev mA (mk_pointer wb wo) v = Some ? mB → ∀rb,ro. eq_block wb rb = false → beloadv mA (mk_pointer rb ro) = beloadv mB (mk_pointer rb ro). #mA #mB #wb #wo #v #Hstore #rb #ro #Hblock_neq whd in ⊢ (??%%); elim mB in Hstore; #contentsB #nextblockB #HnextblockB normalize in ⊢ (% → ?); cases (Zltb (block_id wb) (nextblock mA)) normalize nodelta [ 2: #Habsurd destruct (Habsurd) ] cases (if Zleb (low (blocks mA wb)) (Z_of_unsigned_bitvector 16 (offv wo)) then Zltb (Z_of_unsigned_bitvector 16 (offv wo)) (high (blocks mA wb))  else false) normalize nodelta [ 2: #Habsurd destruct (Habsurd) ] #Heq destruct (Heq) cases (Zltb (block_id rb) (nextblock mA)) normalize nodelta try // cut (eqZb (block_id rb) (block_id wb) = false) [ >eqZb_symmetric @Hblock_neq ] #Heqzb >Heqzb normalize nodelta @refl qed. (* lift [bestorev_beloadv_conservation to [loadn] *) lemma bestorev_loadn_conservation : ∀mA,mB,n,wb,wo,v. bestorev mA (mk_pointer wb wo) v = Some ? mB → ∀rb,ro. eq_block wb rb = false → loadn mA (mk_pointer rb ro) n = loadn mB (mk_pointer rb ro) n. #mA #mB #n elim n [ 1: #wb #wo #v #Hstore #rb #ro #Hblock_neq normalize @refl | 2: #n' #Hind #wb #wo #v #Hstore #rb #ro #Hneq whd in ⊢ (??%%); >(bestorev_beloadv_conservation … Hstore … Hneq) >(Hind … Hstore … Hneq) @refl ] qed. (* lift [bestorev_loadn_conservation] to [load_value_of_type] *) lemma bestorev_load_value_of_type_conservation : ∀mA,mB,wb,wo,v. bestorev mA (mk_pointer wb wo) v = Some ? mB → ∀rb,ro. eq_block wb rb = false → ∀ty.load_value_of_type ty mA rb ro = load_value_of_type ty mB rb ro. #mA #mB #wb #wo #v #Hstore #rb #ro #Hneq #ty cases ty [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] // [ 1: elim sz ] whd in ⊢ (??%%); >(bestorev_loadn_conservation … Hstore … Hneq) @refl qed. (* lift [bestorev_load_value_of_type_conservation] to storen *) lemma storen_load_value_of_type_conservation : ∀l,mA,mB,wb,wo. storen mA (mk_pointer wb wo) l = Some ? mB → ∀rb,ro. eq_block wb rb = false → ∀ty.load_value_of_type ty mA rb ro = load_value_of_type ty mB rb ro. #l elim l [ 1: #mA #mB #wb #wo normalize #Heq destruct // | 2: #hd #tl #Hind #mA #mB #wb #wo #Hstoren cases (some_inversion ????? Hstoren) #mA' * #Hbestorev #Hstoren' whd in match (shift_pointer ???) in Hstoren':(??%?); #rb #ro #Hneq_block #ty lapply (Hind ???? Hstoren' … ro … Hneq_block ty) #Heq1 lapply (bestorev_load_value_of_type_conservation … Hbestorev … ro … Hneq_block ty) #Heq2 // ] qed. lemma store_value_of_type_load_value_of_type_conservation : ∀ty,mA,mB,wb,wo,v. store_value_of_type ty mA wb wo v = Some ? mB → ∀rb,ro. eq_block wb rb = false → ∀ty.load_value_of_type ty mA rb ro = load_value_of_type ty mB rb ro. #ty #mA #mB #wb #wo #v #Hstore #rb #ro #Heq_block cases ty in Hstore; [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] [ 1,4,5,6,7: whd in ⊢ ((??%?) → ?); #Heq destruct ] whd in ⊢ ((??%?) → ?); #Hstoren @(storen_load_value_of_type_conservation … Hstoren … Heq_block) qed. definition typesize' ≝ λty. typesize (typ_of_type ty). lemma storen_load_value_of_type_conservation_in_block_high : ∀E,mA,mB,mC,wo,l. memory_inj E mA mB → ∀blo. storen mB (mk_pointer blo wo) l = Some ? mC → ∀b1,delta. E b1 = Some ? 〈blo,delta〉 → high_bound mA b1 + Zoo delta < Zoo wo → ∀ty,off. Zoo off + Z_of_nat (typesize' ty) < high_bound mA b1 → low_bound … mA b1 ≤ Zoo off → Zoo off < high_bound … mA b1 → load_value_of_type ty mB blo (mk_offset (addition_n ? (offv off) (offv delta))) = load_value_of_type ty mC blo (mk_offset (addition_n ? (offv off) (offv delta))). #E #mA #mB #mC #wo #data #Hinj #blo #Hstoren #b1 #delta #Hembed #Hhigh #ty (* need some stuff asserting that if a ptr is valid at the start of a write it's valid at the end. *) cases data in Hstoren; [ 1: normalize in ⊢ (% → ?); #Heq destruct // | 2: #xd #data ] #Hstoren cases ty [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ]#off #Hofflt #Hlow_load #Hhigh_load try @refl whd in match (load_value_of_type ????) in ⊢ (??%%); [ 1: lapply (storen_to_valid_pointer … Hstoren) * * #Hbounds #Hbefore #Hafter lapply Hofflt -Hofflt lapply Hlow_load -Hlow_load lapply Hhigh_load -Hhigh_load lapply off -off whd in match typesize'; normalize nodelta generalize in match (typesize ?); #n elim n try // #n' #Hind #o #Hhigh_load #Hlow_load #Hlt whd in match (loadn ???) in ⊢ (??%%); whd in match (beloadv ??) in ⊢ (??%%); cases (valid_pointer_to_Prop … Hbefore) * #HltB_store #HlowB_store #HhighB_store cases (valid_pointer_to_Prop … Hafter) * #HltC_store #HlowC_store #HhighC_store >(Zlt_to_Zltb_true … HltB_store) >(Zlt_to_Zltb_true … HltC_store) normalize nodelta cut (Zleb (low (blocks mB blo)) (Z_of_unsigned_bitvector ? (addition_n ? (offv o) (offv delta))) = true) [ 1: (* Notice that: low mA b1 ≤ o < high mA b1 hence, since E b1 = 〈blo, delta〉 with delta >= 0 low mB blo ≤ (low mA b1 + delta) ≤ o+delta < (high mA b1 + delta) ≤ high mB blo *) @cthulhu ] #HA >HA >andb_lsimpl_true -HA cut (Zltb (Z_of_unsigned_bitvector ? (addition_n ? (offv o) (offv delta))) (high (blocks mB blo)) = true) [ 1: (* Same argument as above *) @cthulhu ] #HA >HA normalize nodelta -HA cut (Zleb (low (blocks mC blo)) (Z_of_unsigned_bitvector ? (addition_n ? (offv o) (offv delta))) = true) [ 1: (* Notice that storen does not modify the bounds of a block. Related lemma present in [MemProperties]. This cut follows from this lemma (which needs some info on the size of the data written, which we have but must make explicit) and from the first cut. *) @cthulhu ] #HA >HA >andb_lsimpl_true -HA cut (Zltb (Z_of_unsigned_bitvector ? (addition_n ? (offv o) (offv delta))) (high (blocks mC blo)) = true) [ 1: (* Same argument as above *) @cthulhu ] #HA >HA normalize nodelta -HA normalize in match (bitvector_of_nat ??); whd in match (shift_pointer ???); whd in match (shift_offset ???); >commutative_addition_n >associative_addition_n lapply (Hind (mk_offset (addition_n offset_size (sign_ext 2 ? [[false; true]]) (offv o))) ???) [ 1: (* Provable from Hlt *) @cthulhu | 2: (* Provable from Hlow_load, need to make a "succ" commute from bitvector to Z *) @cthulhu | 3: (* Provable from Hlt, again *) @cthulhu ] cases (loadn mB (mk_pointer blo (mk_offset (addition_n ? (addition_n ? (sign_ext 2 offset_size [[false; true]]) (offv o)) (offv delta)))) n') normalize nodelta cases (loadn mC (mk_pointer blo (mk_offset (addition_n ? (addition_n ? (sign_ext 2 offset_size [[false; true]]) (offv o)) (offv delta)))) n') normalize nodelta try // [ 1,2: #l #Habsurd destruct ] #l1 #l2 #Heq cut (contents (blocks mB blo) (Z_of_unsigned_bitvector ? (addition_n ? (offv o) (offv delta))) = contents (blocks mC blo) (Z_of_unsigned_bitvector ? (addition_n ? (offv o) (offv delta)))) [ 1: (* Follows from Hhigh, indirectly *) @cthulhu ] #Hcontents_eq >Hcontents_eq whd in match (be_to_fe_value ??) in ⊢ (??%%); cases (contents (blocks mC blo) (Z_of_unsigned_bitvector ? (addition_n ? (offv o) (offv delta)))) normalize nodelta try // (* Ok this is going to be more painful than what I thought. *) @cthulhu | *: @cthulhu ] qed. lemma storen_load_value_of_type_conservation_in_block_low : ∀E,mA,mB,mC,wo,l. memory_inj E mA mB → ∀blo. storen mB (mk_pointer blo wo) l = Some ? mC → ∀b1,delta. E b1 = Some ? 〈blo,delta〉 → Zoo wo < low_bound mA b1 + Zoo delta → ∀ty,off. Zoo off + Z_of_nat (typesize' ty) < high_bound mA b1 → low_bound … mA b1 ≤ Zoo off → Zoo off < high_bound … mA b1 → load_value_of_type ty mB blo (mk_offset (addition_n ? (offv off) (offv delta))) = load_value_of_type ty mC blo (mk_offset (addition_n ? (offv off) (offv delta))). @cthulhu qed. (* Writing in the "extended" part of a memory preserves the underlying injection. *) lemma bestorev_memory_inj_to_load_sim : ∀E,mA,mB,mC. ∀Hext:memory_inj E mA mB. ∀block2. ∀i : offset. ∀ty : type. (∀block1,delta. E block1 = Some ? 〈block2, delta〉 → (high_bound mA block1 + (Zoo delta) < (Zoo i)) ∨ (Zoo i + (sizeof ty) ≤ (low_bound mA block1 + (Zoo delta)))) → ∀v.store_value_of_type ty mB block2 i v = Some ? mC → load_sim_ptr E mA mC. #E #mA #mB #mC #Hinj #block2 #i #storety cases storety [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] #Hout #storeval #Hstore whd #b1 #off1 #b2 #off2 #ty #readval #Hvalid #Hptr_trans #Hload_value whd in Hstore:(??%?); [ 1,5,6,7,8: destruct ] [ 1: lapply (mi_inj … Hinj … Hvalid … Hptr_trans … Hload_value) lapply Hload_value -Hload_value cases ty [ | #sz' #sg' | #ptr_ty' | #array_ty' #array_sz' | #domain' #codomain' | #structname' #fieldspec' | #unionname' #fieldspec' | #id' ] #Hload_value (* Prove that the contents of mB where v1 was were untouched. *) * #readval' * #Hload_value2 #Hvalue_eq %{readval'} @conj try assumption cases (some_inversion ????? Hptr_trans) * #b2' #delta' * #Hembed -Hptr_trans normalize nodelta #Heq destruct (Heq) @(eq_block_elim … b2 block2) [ 2,4,6,8: #Hblocks_neq 0 *) @cthulhu | 2,4,6,8: (* deductible from Hload_value *) @cthulhu ] ] ] | *: (* exactly the same proof as above *) @cthulhu ] qed. (* Lift the above result to memory_inj * This is Lemma 40 of Leroy & Blazy *) lemma store_value_of_type_memory_inj_to_memory_inj : ∀E,mA,mB,mC. ∀Hext:memory_inj E mA mB. ∀block2. ∀i : offset. ∀ty : type. (∀block1,delta. E block1 = Some ? 〈block2, delta〉 → (high_bound mA block1 + (Zoo delta) < (Zoo i)) ∨ (Zoo i + (sizeof ty) ≤ (low_bound mA block1 + (Zoo delta)))) → ∀v.store_value_of_type ty mB block2 i v = Some ? mC → memory_inj E mA mC. #E #mA #mB #mC #Hinj #block2 #i #ty #Hout #v #Hstore % lapply (bestorev_memory_inj_to_load_sim … Hinj … Hout … Hstore) #Hsim try // cases Hinj #Hsim' #Hfreeblock #Hvalid #Hnodangling #Hregion #Hnonalias #Himpl try assumption [ #p #p' #Hptr #Hptr_trans lapply (Hvalid p p' Hptr Hptr_trans) #Hvalid cases ty in Hstore; [ | #sz' #sg' | #ptr_ty' | #array_ty' #array_sz' | #domain' #codomain' | #structname' #fieldspec' | #unionname' #fieldspec' | #id' ] whd in ⊢ ((??%?) → ?); [ 1,4,5,6,7: #Habsurd destruct ] cases (fe_to_be_values ??) [ 1,3,5,7: whd in ⊢ ((??%?) → ?); #Heq Hnext try // cases (Hbounds (pblock p')) #HA #HB whd in match (low_bound ??); whd in match (high_bound ??); >HA >HB try assumption ] | lapply (mem_bounds_after_store_value_of_type … Hstore) * #Hnext_eq #Hb #b #b' #o' cases (Hb b') #Hlow_eq #Hhigh_eq #Hembed lapply (mi_nodangling … Hinj … Hembed) whd in match (valid_block ??) in ⊢ (% → %); >(Hnext_eq) try // | #b lapply (mi_nowrap … Hinj b) whd in ⊢ (% → %); cases (E b) try // * #BLO #OFF normalize nodelta * * #Hb #HBLO #Hbound try @conj try @conj try assumption lapply (mem_bounds_after_store_value_of_type … Hstore) * #_ #Hbounds cases (Hbounds BLO) #Hlow #Hhigh whd % [ >unfold_low_bound | >unfold_high_bound ] >Hlow >Hhigh cases HBLO try // ] qed. (* ---------------------------------------------------------- *) (* Lemma 41 of the paper of Leroy & Blazy on the memory model * and related lemmas *) (* The back-end might contain something similar to this lemma. *) lemma be_to_fe_value_ptr : ∀b,o. (be_to_fe_value ASTptr (fe_to_be_values ASTptr (Vptr (mk_pointer b o))) = Vptr (mk_pointer b o)). #b * #o whd in ⊢ (??%%); normalize cases b (* #br *) #bid normalize nodelta (*cases br normalize nodelta *) >eqZb_z_z normalize nodelta cases (vsplit_eq bool 7 8 … o) #lhs * #rhs #Heq <(vsplit_prod … Heq) >eq_v_true normalize nodelta try @refl * // qed. lemma fe_to_be_values_length : ∀E,v1,v2,ty. value_eq E v1 v2 → |fe_to_be_values ty v1| = |fe_to_be_values ty v2|. #E #v1 #v2 #ty #Hvalue_eq @(value_eq_inversion … Hvalue_eq) // qed. lemma value_eq_to_be_and_back_again : ∀E,ty,v1,v2. value_eq E v1 v2 → ast_typ_consistent_with_value ty v1 → value_eq E (be_to_fe_value ty (fe_to_be_values ty v1 )) (be_to_fe_value ty (fe_to_be_values ty v2)). #E #ty #v1 #v2 #Hvalue_eq @(value_eq_inversion … Hvalue_eq) try // [ 1: cases ty // | 2: #sz #i cases ty [ 2: @False_ind | 1: #sz' #sg #H lapply H whd in ⊢ (% → ?); #Heq lapply (fe_to_be_to_fe_value_int … H) #H >H // ] | 3: #p1 #p2 #Hembed cases ty [ 1: #sz #sg @False_ind | 2: #_ cases p1 in Hembed; #b1 #o1 cases p2 #b2 #o2 whd in ⊢ ((??%%) → ?); #H cases (some_inversion ????? H) * #b3 #o3 * #Hembed normalize nodelta #Heq >be_to_fe_value_ptr >be_to_fe_value_ptr destruct %4 whd in match (pointer_translation ??); >Hembed normalize nodelta @refl ] ] qed. (* ------------------------------------------------------------ *) (* Lemma ?? of the paper of Leroy & Blazy on the memory model ? *) lemma storen_parallel_memory_exists : ∀E,m1,m2. memory_inj E m1 m2 → ∀b1,b2,delta. E b1 = Some ? 〈b2,delta〉 → ∀v1,v2. value_eq E v1 v2 → ∀i,m1',ty. storen m1 (mk_pointer b1 i) (fe_to_be_values ty v1) = Some ? m1'→ ∃m2'. storen m2 (mk_pointer b2 (offset_plus i delta)) (fe_to_be_values ty v2) = Some ? m2'. (* ∧ loadn m2' (mk_pointer b2 (offset_plus i delta)) (|fe_to_be_values ty v2|) = Some ? (fe_to_be_values ty v2).*) #E #m1 #m2 #Hinj #b1 #b2 #delta #Hembed #v1 #v2 #Hvalue_eq #i #m1' #ty lapply (mi_valid_pointers … Hinj) generalize in match m2; generalize in match m1; generalize in match i; lapply (fe_to_be_values_length … ty Hvalue_eq) generalize in match (fe_to_be_values ty v2); generalize in match (fe_to_be_values ty v1); #l1 elim l1 [ 1: #l2 #Hl2 cut (l2 = []) [ cases l2 in Hl2; // #hd' #tl' normalize #Habsurd destruct ] #Hl2_empty >Hl2_empty #o #m1 #m2 #_ normalize /2 by ex_intro/ | 2: #hd1 #tl1 #Hind #l2 #Hlength cut (∃hd2,tl2.l2 = hd2::tl2 ∧ |tl1| = |tl2|) [ cases l2 in Hlength; [ normalize #Habsurd destruct | #hd2 #tl2 normalize #H %{hd2} %{tl2} @conj try @refl destruct (H) assumption ] ] * #hd2 * #tl2 * #Heq2 destruct (Heq2) #Hlen_tl #o #m1 #m2 #Hvalid_embed #Hstoren cases (some_inversion ????? Hstoren) #m1_tmp * #Hbestorev #Hstoren_tl lapply (Hvalid_embed (mk_pointer b1 o) (mk_pointer b2 (offset_plus o delta)) ??) [ 1: whd in match (pointer_translation ??); >Hembed normalize nodelta @refl | 2: @(bestorev_to_valid_pointer … Hbestorev) ] #Hvalid_ptr_m2 whd in match (storen ???); lapply (valid_pointer_to_bestorev_ok m2 (mk_pointer b2 (offset_plus o delta)) hd2 Hvalid_ptr_m2) * #m2_tmp #Hbestorev2 >Hbestorev2 normalize nodelta whd in match (shift_pointer ???) in Hstoren_tl ⊢ %; whd in match (shift_offset ???) in Hstoren_tl ⊢ %; (* normalize in match (sign_ext ???) in Hstoren_tl ⊢ %;*) cut (mk_offset (addition_n ? (offv (offset_plus o delta)) (sign_ext 2 offset_size (bitvector_of_nat 2 1))) = offset_plus (offset_plus o (mk_offset (sign_ext 2 offset_size (bitvector_of_nat 2 1)))) delta) [ cases o #o' normalize nodelta whd in match (offset_plus ??) in ⊢ (??%%); whd in match (offset_plus ??) in ⊢ (???(?%)); /2 by associative_addition_n, commutative_addition_n, refl/ ] #Heq_cleanup >Heq_cleanup >Heq_cleanup in Hind; #Hind @(Hind … Hstoren_tl) try assumption #p #p' #Hvalid_in_m1_tmp #Hp_embed @valid_pointer_of_Prop lapply (mem_bounds_invariant_after_bestorev … Hbestorev) * * * * #Hnextblock_eq #Hbounds_eq #Hoffset_in_bounds #Hcontents_at_eq #Hcontents_else_eq lapply (mem_bounds_invariant_after_bestorev … Hbestorev2) * * * * #Hnextblock_eq2 #Hbounds_eq2 #Hoffset_in_bounds2 #Hcontents_at_eq2 #Hcontents_else_eq2 cut (valid_pointer m1 p = true) [ @valid_pointer_of_Prop cases (valid_pointer_to_Prop … Hvalid_in_m1_tmp) >Hnextblock_eq cases (Hbounds_eq (pblock p)) #Hlow' #Hhigh' whd in match (low_bound ??); whd in match (high_bound ??); >Hlow' >Hhigh' * /3 by conj/ ] #Hvalid_in_m1 lapply (Hvalid_embed p p' Hvalid_in_m1 Hp_embed) #Hvalid_in_m2 cases (valid_pointer_to_Prop … Hvalid_in_m2) * #Hnextblock2' #Hlow2' #Hhigh2' @conj try @conj >Hnextblock_eq2 try assumption cases (Hbounds_eq2 … (pblock p')) #Hlow2'' #Hhigh2'' whd in match (low_bound ??); whd in match (high_bound ??); >Hlow2'' >Hhigh2'' assumption ] qed. lemma storen_parallel_memory_exists_and_preserves_loads : ∀E,m1,m2. memory_inj E m1 m2 → ∀b1,b2,delta. E b1 = Some ? 〈b2,delta〉 → ∀v1,v2. value_eq E v1 v2 → ∀i,m1',ty. storen m1 (mk_pointer b1 i) (fe_to_be_values ty v1) = Some ? m1'→ ∃m2'. storen m2 (mk_pointer b2 (offset_plus i delta)) (fe_to_be_values ty v2) = Some ? m2' ∧ loadn m2' (mk_pointer b2 (offset_plus i delta)) (|fe_to_be_values ty v2|) = Some ? (fe_to_be_values ty v2). #E #m1 #m2 #Hinj #b1 #b2 #delta #Hembed #v1 #v2 #Hvalue_eq #i #m1' #ty #Hstoren cases (storen_parallel_memory_exists … Hinj … Hembed … Hvalue_eq i m1' ty Hstoren) #m2' #Hstoren' %{m2'} @conj try assumption @(storen_loadn_ok … Hstoren') // qed. (* If E ⊢ m1 ↦ m2 *and* if E b1 = 〈b2,delta〉, *and* if we write /A PROPER VALUE/ at 〈b1,i〉 successfuly, *then* we can write /something value_eq-compatible/ in m2 at 〈b2,i+delta〉 successfuly yielding m2' *and* preserve the injection : E ⊢ m1' ↦ m2' N.B.: this result cannot be given at the back-end level : we could overwrite a pointer and break the value_eq correspondence between the memories. *) axiom storen_parallel_aux : ∀E,m1,m2. ∀Hinj:memory_inj E m1 m2. ∀v1,v2. value_eq E v1 v2 → ∀b1,b2,delta. E b1 = Some ? 〈b2, delta〉 → ∀ty,i,m1'. (* ast_typ_consistent_with_value ty v1 → *) storen m1 (mk_pointer b1 i) (fe_to_be_values ty v1) = Some ? m1' → ∃m2'. storen m2 (mk_pointer b2 (offset_plus i delta)) (fe_to_be_values ty v2) = Some ? m2' ∧ memory_inj E m1' m2'. (* This lemma is not provable as in CompCert. In the following chunk of text, I'll try to * explain why, and how we might still be able to prove it given enough time. I'll be refering to a paper by Leroy & Blazy in the J.A.R. In CompCert, when storing some data of size S in some location 〈block, offset〉, what happens is that 1) the memory is extended with a map from 〈block,offset〉 to the actual data 2) the memory inteval from 〈block,offset+1〉 to 〈block,offset+S-1〉 is invalidated, meaning that every load in this interval will fail. This behaviour is documented at page 9 in the aforementioned paper. The memory model of Cerco is in a sense much more realistic. When storing a front-end value fv, the story is the following : 1) the value fv is marshalled into a list of back-end values (byte-sized) which correspond to the actual size of the data in-memory. 2) This list is then stored as-is at the location 〈block, offset〉. Now on to the lemma we want to prove. The difficult part is to prove [load_sim E m1' m2'], i.e. we want to prove that ∀c1,off1,c2,off2,delta. s.t. E c1 = 〈c2, off2〉 load_value_of_type m1' c1 off1 = ⌊vA⌋ → load_value_of_type m2' c2 (off1+off2) = ⌊vB⌋ ∧ value_eq E vA vB, where m1' and m2' are the result of storing some values v1 and v2 in resp. m1 and m2 at some resp. locations 〈b1,i〉 and (pointer_translation E b1 i) (cf statement). In CompCert, the proof of this statement goes by case analysis on 〈c1,off1〉. Case 1: 〈b1,i〉 = 〈c1,off1〉 → we read the freshly stored data. Case 2: b1 = c1 but [i; i+|v1|] and [c1, c1+|vA|] describe /disjoint/ areas of the same block. In this case, we can use the lemma store_value_load_disjoint on the hypothesis (load_value_of_type m1' c1 off1 = ⌊vA⌋) yielding (load_value_of_type m1' c1 off1 = load_value_of_type m1 c1 off1) allowing us to use the load_sim hypothesis on (m1, m2) to obtain (load_value_of_type m2 c2 (off1+off2) = ⌊vB⌋) which we can lift back to (load_value_of_type m2' c2 (off1+off2) = ⌊vB⌋) using the disjointness hypothesis contained in the original memory injection [Hinj]. case 4: b1 ≠ c1, nothing difficult case 3: the intervals [i; i+|v1|] and [c1, c1+|vA|] /overalap/ ! in CompCert, the prof follows simply from the fact that the load (load_value_of_type m1' c1 off1) will fail because of invalidated memory, yielding a contradiction. We have no such possibility in Cerco. I see a possible, convoluted way out of this problem. In the case of an overlap, we need to proceed to an inversion on load_value_of_type m1' c1 off1 = ⌊vA⌋ and to actually look at the data beeing written. If we succeeded in proceeding to this load, this means that the back-end values stored are "consistent". Besides, we need to proceed to a case analysis on what we stored beforehand. A) If we stored an integer . of size 8: this means that the overlap actually includes all of the freshly stored area. This area contains one [BVByte]. If we succeeded in performing an overlapping load, we either overlapped from the beginning, from the end or from both ends of the stored area. In all cases, since this area contains a BVByte, all other offsets MUST contain BVBytes, otherwise we would have a contradiction ... (cumbersome to show but possible) Hence, we can splice the load in 2 or 3 sub-loads : . re-use the Case 2 above (disjoint areas) for the parts of the load that are before and after the stored area . re-use the Case 1 above for the stored area and show the whole successful load . of size 16,32: we have more freedom but only a finite number of overlap possibilities in each case. We can enumerate them and proceed along the same lines as in the 8 bit case. B) If we stored a pointer (of size 2 bytes, necessarily) . the bytes of a pointer are stored in order and (/!\ important /!\) are /numbered/. This means that any successful overlapping load has managed to read a pointer in the wrong order, which yields a contradiction. C) If we stored a Vnull, roughly the same reasoning as in the pointer case D) If we stored a Vundef ... This gets hairy. We must reason on the way fe_to_be_values and be_to_fe_value works. What we want is to show that the load [load_value_of_a type m1' c1 off1] yields a Vundef. This can happen in several ways. . If we try to load something of integer type, we will have a Vundef because we can show that some of the back-end values are BVundef (cf [build_integer]) . If we try to load a pointer, it will also fail for the same reason. I'll admit that I'm not too sure about the last case. Also, my reasoning on pointers might fail if we have "longer that 2 bytes" pointers. This was a high-level and sketchy proof, and in the interests of time I decide to axiomatize it. *) (* This is lemma 60 from Leroy&Blazy *) lemma store_value_of_type_parallel : ∀E,m1,m2. ∀Hinj:memory_inj E m1 m2. ∀v1,v2. value_eq E v1 v2 → ∀b1,b2,delta. E b1 = Some ? 〈b2, delta〉 → ∀ty,i,m1'. (* type_consistent_with_value ty v1 → *) store_value_of_type ty m1 b1 i v1 = Some ? m1' → ∃m2'. store_value_of_type ty m2 b2 (offset_plus i delta) v2 = Some ? m2' ∧ memory_inj E m1' m2'. #E #m1 #m2 #Hinj #v1 #v2 #Hvalue_eq #b1 #b2 #delta #Hembed #ty #i #m1' cases ty [ | #sz' #sg' | #ptr_ty' | #array_ty' #array_sz' | #domain' #codomain' | #structname' #fieldspec' | #unionname' #fieldspec' | #id' ] whd in ⊢ ((??%?) → ?); [ 1,4,5,6,7: #Habsurd destruct ] whd in match (store_value_of_type ?????); @(storen_parallel_aux … Hinj … Hembed) // qed. lemma store_value_of_type_load_sim : ∀E,m1,m2,m1'. ∀Hinj:memory_inj E m1 m2. ∀ty,b,i,v. E b = None ? → store_value_of_type ty m1 b i v = Some ? m1' → load_sim_ptr E m1' m2. #E #m1 #m2 #m1' #Hinj #ty #b #i #v #Hembed #Hstore cases Hinj #Hsim #Hfreeblock #Hvalid #Hnodangling #Hregion_eq #Hnonalias #Himpl cases ty in Hstore; [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] [ 1,4,5,6,7: #Heq normalize in Heq; destruct ] #Hstore whd #b1 #off1 #b2 #off2 #ty' #v1 #Hvalid #Hembed' #Hload lapply (store_value_of_type_load_value_of_type_conservation … Hstore b1 off1 ? ty') [ 1,3,5: @(eq_block_elim … b b1) try // #Heq >Heq in Hembed; #Hembed whd in Hembed':(??%?); >Hembed in Hembed'; #H normalize in H; destruct ] #Heq H1 @H | #p #p' #Hvalid @(mi_valid_pointers ??? Hinj) @valid_pointer_of_Prop lapply (valid_pointer_to_Prop … Hvalid) >H1 cases (H2 (pblock p)) #HA #HB >unfold_high_bound >unfold_low_bound >unfold_high_bound >unfold_low_bound >HA >HB // | @(mi_nodangling … Hinj) | @(mi_region … Hinj) | whd #b1 #b2 #b1' #b2' #delta1 #delta2 #Hneq #Hembed1 #Hembed2 whd in match (block_is_empty ??); whd in match (block_is_empty m1' b2); >unfold_high_bound >unfold_low_bound >unfold_high_bound >unfold_low_bound cases (H2 b1) #HA #HB cases (H2 b2) #HC #HD >HA >HB >HC >HD @(mi_nonalias ??? Hinj) assumption | #bb whd lapply (mi_nowrap ??? Hinj bb) whd in ⊢ (% → ?); cases (E bb) normalize nodelta try // * #bb' #off normalize nodelta whd in match (block_implementable_bv ??); whd in match (block_implementable_bv m2 bb'); whd in match (block_implementable_bv m1' bb); >unfold_high_bound >unfold_low_bound >unfold_high_bound >unfold_low_bound >unfold_high_bound cases (H2 bb) #HA #HB >HA >HB // ] ] qed. (* ------------------------------------------------------------------------- *) (* Commutation results of standard unary and binary operations with value_eq. *) lemma unary_operation_value_eq : ∀E,op,v1,v2,ty. value_eq E v1 v2 → ∀r1. sem_unary_operation op v1 ty = Some ? r1 → ∃r2.sem_unary_operation op v2 ty = Some ? r2 ∧ value_eq E r1 r2. #E #op #v1 #v2 #ty #Hvalue_eq #r1 inversion Hvalue_eq [ 1: #v #Hv1 #Hv2 destruct cases op normalize [ 1: cases ty [ | #sz #sg | #ty | #ty #n | #tl #ty | #id #fl | #id #fl | #ty ] normalize #Habsurd destruct (Habsurd) | 3: cases ty [ | #sz #sg | #ty | #ty #n | #tl #ty | #id #fl | #id #fl | #ty ] normalize #Habsurd destruct (Habsurd) | 2: #Habsurd destruct (Habsurd) ] | 2: #vsz #i #Hv1 #Hv2 #_ cases op [ 1: cases ty [ | #sz #sg | #ty | #ty #n | #tl #ty | #id #fl | #id #fl | #ty ] whd in ⊢ ((??%?) → ?); whd in match (sem_unary_operation ???); [ 2: cases (eq_intsize sz vsz) normalize nodelta #Heq1 destruct %{(of_bool (eq_bv (bitsize_of_intsize vsz) i (zero (bitsize_of_intsize vsz))))} cases (eq_bv (bitsize_of_intsize vsz) i (zero (bitsize_of_intsize vsz))) @conj // | *: #Habsurd destruct (Habsurd) ] | 2: whd in match (sem_unary_operation ???); #Heq1 destruct %{(Vint vsz (exclusive_disjunction_bv (bitsize_of_intsize vsz) i (mone vsz)))} @conj // | 3: whd in match (sem_unary_operation ???); cases ty [ | #sz #sg | #ty | #ty #n | #tl #ty | #id #fl | #id #fl | #ty ] normalize nodelta whd in ⊢ ((??%?) → ?); [ 2: cases (eq_intsize sz vsz) normalize nodelta #Heq1 destruct %{(Vint vsz (two_complement_negation (bitsize_of_intsize vsz) i))} @conj // | *: #Habsurd destruct (Habsurd) ] ] | 3: #Hv1 #Hv2 #_ destruct whd in match (sem_unary_operation ???); cases op normalize nodelta [ 1: cases ty [ | #sz #sg | #ty | #ty #n | #tl #ty | #id #fl | #id #fl | #ty ] whd in match (sem_notbool ??); #Heq1 destruct /3 by ex_intro, vint_eq, conj/ | 2: normalize #Habsurd destruct (Habsurd) | 3: cases ty [ | #sz #sg | #ty | #ty #n | #tl #ty | #id #fl | #id #fl | #ty ] whd in match (sem_neg ??); #Heq1 destruct ] | 4: #p1 #p2 #Hptr_translation #Hv1 #Hv2 #_ whd in match (sem_unary_operation ???); cases op normalize nodelta [ 1: cases ty [ | #sz #sg | #ty | #ty #n | #tl #ty | #id #fl | #id #fl | #ty ] whd in match (sem_notbool ??); #Heq1 destruct /3 by ex_intro, vint_eq, conj/ | 2: normalize #Habsurd destruct (Habsurd) | 3: cases ty [ | #sz #sg | #ty | #ty #n | #tl #ty | #id #fl | #id #fl | #ty ] whd in match (sem_neg ??); #Heq1 destruct ] ] qed. (* value_eq lifts to addition *) lemma add_value_eq : ∀E,v1,v2,v1',v2',ty1,ty2. value_eq E v1 v2 → value_eq E v1' v2' → (* memory_inj E m1 m2 → This injection seems useless TODO *) ∀r1. (sem_add v1 ty1 v1' ty2=Some val r1→ ∃r2:val.sem_add v2 ty1 v2' ty2=Some val r2∧value_eq E r1 r2). #E #v1 #v2 #v1' #v2' #ty1 #ty2 #Hvalue_eq1 #Hvalue_eq2 #r1 @(value_eq_inversion E … Hvalue_eq1) [ 1: | 2: #sz #i | 3: | 4: #p1 #p2 #Hembed ] [ 1: whd in match (sem_add ????); normalize nodelta cases (classify_add ty1 ty2) normalize nodelta [ 1: #sz #sg | 2: #n #ty #sz #sg | 3: #n #sz #sg #ty | 4: #ty1' #ty2' ] #Habsurd destruct (Habsurd) | 2: whd in match (sem_add ????); whd in match (sem_add ????); normalize nodelta cases (classify_add ty1 ty2) normalize nodelta [ 1: #tsz #tsg | 2: #tn #ty #tsz #tsg | 3: #tn #tsz #tsg #ty | 4: #ty1' #ty2' ] [ 2,4: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ 1,5: | 2,6: #sz' #i' | 3,7: | 4,8: #p1' #p2' #Hembed' ] [ 1,2,4,5,7: #Habsurd destruct (Habsurd) ] [ 1: @intsize_eq_elim_elim [ 1: #_ #Habsurd destruct (Habsurd) | 2: #Heq destruct (Heq) normalize nodelta #Heq destruct (Heq) /3 by ex_intro, conj, vint_eq/ ] | 2: @eq_bv_elim normalize nodelta #Heq1 #Heq2 destruct /3 by ex_intro, conj, vint_eq/ | 3: #Heq destruct (Heq) normalize in Hembed'; elim p1' in Hembed'; #b1' #o1' normalize nodelta #Hembed @(ex_intro … (conj … (refl …) ?)) @vptr_eq whd in match (pointer_translation ??); cases (E b1') in Hembed; [ 1: normalize in ⊢ (% → ?); #Habsurd destruct (Habsurd) | 2: * #block #offset normalize nodelta #Heq destruct (Heq) whd in match (shift_pointer_n ????); cut (offset_plus (shift_offset_n (bitsize_of_intsize sz) o1' (sizeof ty) tsg i) offset = (shift_offset_n (bitsize_of_intsize sz) (mk_offset (addition_n ? (offv o1') (offv offset))) (sizeof ty) tsg i)) [ 1: whd in match (offset_plus ??); whd in match (shift_offset_n ????) in ⊢ (??%%); >commutative_addition_n >associative_addition_n <(commutative_addition_n … (offv offset) (offv o1')) @refl ] #Heq >Heq @refl ] ] (* | 3: whd in match (sem_add ????); whd in match (sem_add ????); normalize nodelta cases (classify_add ty1 ty2) normalize nodelta [ 1: #tsz #tsg | 2: #tn #ty #tsz #tsg | 3: #tn #tsz #tsg #ty | 4: #ty1' #ty2' ] [ 1,3,4: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ 1: | 2: #sz' #i'| 3: | 4: #p1' #p2' #Hembed' ] [ 1,3,4,5,7: #Habsurd destruct (Habsurd) ] #Heq >Heq %{r1} @conj // /3 by ex_intro, conj, vfloat_eq/ *) | 3: whd in match (sem_add ????); whd in match (sem_add ????); normalize nodelta cases (classify_add ty1 ty2) normalize nodelta [ 1: #tsz #tsg | 2: #tn #ty #tsz #tsg | 3: #tn #tsz #tsg #ty | 4: #ty1' #ty2' ] [ 1,3,4: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ 1: | 2: #sz' #i' | 3: | 4: #p1' #p2' #Hembed' ] [ 1,3,4,5: #Habsurd destruct (Habsurd) ] @eq_bv_elim [ 1: normalize nodelta #Heq1 #Heq2 destruct /3 by ex_intro, conj, vnull_eq/ | 2: #_ normalize nodelta #Habsurd destruct (Habsurd) ] | 4: whd in match (sem_add ????); whd in match (sem_add ????); normalize nodelta cases (classify_add ty1 ty2) normalize nodelta [ 1: #tsz #tsg | 2: #tn #ty #tsz #tsg | 3: #tn #tsz #tsg #ty | 4: #ty1' #ty2' ] [ 1,3,4: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ 1: | 2: #sz' #i' | 3: | 4: #p1' #p2' #Hembed' ] [ 1,3,4,5: #Habsurd destruct (Habsurd) ] #Heq destruct (Heq) @(ex_intro … (conj … (refl …) ?)) @vptr_eq whd in match (pointer_translation ??) in Hembed ⊢ %; elim p1 in Hembed; #b1 #o1 normalize nodelta cases (E b1) [ 1: normalize in ⊢ (% → ?); #Habsurd destruct (Habsurd) | 2: * #block #offset normalize nodelta #Heq destruct (Heq) whd in match (shift_pointer_n ????); whd in match (shift_offset_n ????) in ⊢ (??%%); whd in match (offset_plus ??); whd in match (offset_plus ??); >commutative_addition_n >(associative_addition_n … offset_size ?) >(commutative_addition_n ? (offv offset) ?) @refl ] ] qed. lemma subtraction_delta : ∀x,y,delta. subtraction offset_size (addition_n offset_size x delta) (addition_n offset_size y delta) = subtraction offset_size x y. #x #y #delta whd in match subtraction; normalize nodelta (* Remove all the equal parts on each side of the equation. *) two_complement_negation_plus <(commutative_addition_n … (two_complement_negation ? delta)) >(associative_addition_n ? delta) >bitvector_opp_addition_n >(commutative_addition_n ? (zero ?)) >neutral_addition_n @refl qed. (* Offset subtraction is invariant by translation *) lemma sub_offset_translation : ∀n,x,y,delta. sub_offset n x y = sub_offset n (offset_plus x delta) (offset_plus y delta). #n #x #y #delta whd in match (sub_offset ???) in ⊢ (??%%); elim x #x' elim y #y' elim delta #delta' whd in match (offset_plus ??); whd in match (offset_plus ??); >subtraction_delta @refl qed. (* value_eq lifts to subtraction *) lemma sub_value_eq : ∀E,v1,v2,v1',v2',ty1,ty2,target. value_eq E v1 v2 → value_eq E v1' v2' → ∀r1. (sem_sub v1 ty1 v1' ty2 target=Some val r1→ ∃r2:val.sem_sub v2 ty1 v2' ty2 target=Some val r2∧value_eq E r1 r2). #E #v1 #v2 #v1' #v2' #ty1 #ty2 #target #Hvalue_eq1 #Hvalue_eq2 #r1 @(value_eq_inversion E … Hvalue_eq1) [ | #sz #i | 3: | 4: #p1 #p2 #Hembed ] [ 1: whd in match (sem_sub ?????); normalize nodelta cases (classify_sub ty1 ty2) normalize nodelta [ #sz #sg | #n #ty #sz #sg | #n #sz #sg #ty |#ty1' #ty2' ] #Habsurd destruct (Habsurd) | 2: whd in match (sem_sub ?????); whd in match (sem_sub ?????); normalize nodelta cases (classify_sub ty1 ty2) normalize nodelta [ 1: #tsz #tsg | 2: #tn #ty #tsz #tsg | 3: #tn #tsz #tsg #ty | 4: #ty1' #ty2' ] [ 2,3,4: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ | #sz' #i' | | #p1' #p2' #Hembed' ] [ 1,3,4: #Habsurd destruct (Habsurd) ] @intsize_eq_elim_elim [ 1: #_ #Habsurd destruct (Habsurd) | 2: #Heq destruct (Heq) normalize nodelta #Heq destruct (Heq) /3 by ex_intro, conj, vint_eq/ ] (*| 3: whd in match (sem_sub ????); whd in match (sem_sub ????); normalize nodelta cases (classify_sub ty1 ty2) normalize nodelta [ 1: #tsz #tsg | 2: #tn #ty #tsz #tsg | 3: #tn #tsz #tsg #ty | 4: #ty1' #ty2' ] [ 1,4: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ | #sz' #i' | | #p1' #p2' #Hembed' ] [ 1,2,4,5: #Habsurd destruct (Habsurd) ] #Heq destruct (Heq) /3 by ex_intro, conj, vfloat_eq/ *) | 3: whd in match (sem_sub ?????); whd in match (sem_sub ?????); normalize nodelta cases (classify_sub ty1 ty2) normalize nodelta [ 1: #tsz #tsg | 2: #tn #ty #tsz #tsg | 3: #tn #tsz #tsg #ty | 4: #ty1' #ty2' ] [ 1,4: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ 1,5: | 2,6: #sz' #i' | 3,7: | 4,8: #p1' #p2' #Hembed' ] [ 1,2,4,5,7,8: #Habsurd destruct (Habsurd) ] [ 1: @eq_bv_elim [ 1: normalize nodelta #Heq1 #Heq2 destruct /3 by ex_intro, conj, vnull_eq/ | 2: #_ normalize nodelta #Habsurd destruct (Habsurd) ] | 2: cases target [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] normalize nodelta #Heq destruct (Heq) /3 by ex_intro, conj, vnull_eq/ ] | 4: whd in match (sem_sub ?????); whd in match (sem_sub ?????); normalize nodelta cases (classify_sub ty1 ty2) normalize nodelta [ 1: #tsz #tsg | 2: #tn #ty #tsz #tsg | 3: #tn #tsz #tsg #ty | 4: #ty1' #ty2' ] [ 1,4: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ 1,5: | 2,6: #sz' #i' | 3,7: | 4,8: #p1' #p2' #Hembed' ] [ 1,2,4,5,6,7: #Habsurd destruct (Habsurd) ] #Heq destruct (Heq) [ 1: @(ex_intro … (conj … (refl …) ?)) @vptr_eq whd in match (pointer_translation ??) in Hembed ⊢ %; elim p1 in Hembed; #b1 #o1 normalize nodelta cases (E b1) [ 1: normalize in ⊢ (% → ?); #Habsurd destruct (Habsurd) | 2: * #block #offset normalize nodelta #Heq destruct (Heq) whd in match (offset_plus ??) in ⊢ (??%%); whd in match (neg_shift_pointer_n ?????) in ⊢ (??%%); whd in match (neg_shift_offset_n ?????) in ⊢ (??%%); whd in match (subtraction) in ⊢ (??%%); normalize nodelta generalize in match (short_multiplication ???); #mult /3 by associative_addition_n, commutative_addition_n, refl/ ] | 2: lapply Heq @eq_block_elim [ 2: #_ normalize nodelta #Habsurd destruct (Habsurd) | 1: #Hpblock1_eq normalize nodelta elim p1 in Hpblock1_eq Hembed Hembed'; #b1 #off1 elim p1' #b1' #off1' whd in ⊢ (% → % → ?); #Hpblock1_eq destruct (Hpblock1_eq) whd in ⊢ ((??%?) → (??%?) → ?); cases (E b1') normalize nodelta [ 1: #Habsurd destruct (Habsurd) ] * #dest_block #dest_off normalize nodelta #Heq_ptr1 #Heq_ptr2 destruct >eq_block_b_b normalize nodelta cases (eqb (sizeof tsg) O) normalize nodelta [ 1: #Habsurd destruct (Habsurd) | 2: cases target [ | #sz #sg | #ptr_ty | #array_ty #array_sz | #domain #codomain | #structname #fieldspec | #unionname #fieldspec | #id ] normalize nodelta #Heq destruct (Heq) <(sub_offset_translation 32 off1 off1' dest_off) cases (division_u ? (sub_offset ? off1 off1') (repr (sizeof tsg))) in Heq; [ 1: normalize nodelta #Habsurd destruct (Habsurd) | 2: #r1' cases sg normalize nodelta #Heq2 destruct (Heq2) /3 by ex_intro, conj, vint_eq/ ] ] ] ] ] qed. lemma mul_value_eq : ∀E,v1,v2,v1',v2',ty1,ty2. value_eq E v1 v2 → value_eq E v1' v2' → ∀r1. (sem_mul v1 ty1 v1' ty2=Some val r1→ ∃r2:val.sem_mul v2 ty1 v2' ty2=Some val r2∧value_eq E r1 r2). #E #v1 #v2 #v1' #v2' #ty1 #ty2 #Hvalue_eq1 #Hvalue_eq2 #r1 @(value_eq_inversion E … Hvalue_eq1) [ | #sz #i | 3: | 4: #p1 #p2 #Hembed ] [ 1: whd in match (sem_mul ????); normalize nodelta cases (classify_aop ty1 ty2) normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] #Habsurd destruct (Habsurd) | 2: whd in match (sem_mul ????); whd in match (sem_mul ????); normalize nodelta cases (classify_aop ty1 ty2) normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] [ 2: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ | #sz' #i' | | #p1' #p2' #Hembed' ] [ 1,3,4: #Habsurd destruct (Habsurd) ] @intsize_eq_elim_elim [ 1: #_ #Habsurd destruct (Habsurd) | 2: #Heq destruct (Heq) normalize nodelta #Heq destruct (Heq) /3 by ex_intro, conj, vint_eq/ ] | 3: whd in match (sem_mul ????); whd in match (sem_mul ????); normalize nodelta cases (classify_aop ty1 ty2) normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] [ 1,2: #Habsurd destruct (Habsurd) ] | 4: whd in match (sem_mul ????); whd in match (sem_mul ????); normalize nodelta cases (classify_aop ty1 ty2) normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] #Habsurd destruct (Habsurd) ] qed. lemma div_value_eq : ∀E,v1,v2,v1',v2',ty1,ty2. value_eq E v1 v2 → value_eq E v1' v2' → ∀r1. (sem_div v1 ty1 v1' ty2=Some val r1→ ∃r2:val.sem_div v2 ty1 v2' ty2=Some val r2∧value_eq E r1 r2). #E #v1 #v2 #v1' #v2' #ty1 #ty2 #Hvalue_eq1 #Hvalue_eq2 #r1 @(value_eq_inversion E … Hvalue_eq1) [ | #sz #i | 3: | 4: #p1 #p2 #Hembed ] [ 1: whd in match (sem_div ????); normalize nodelta cases (classify_aop ty1 ty2) normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] #Habsurd destruct (Habsurd) | 2: whd in match (sem_div ????); whd in match (sem_div ????); normalize nodelta cases (classify_aop ty1 ty2) normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] [ 2: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ | #sz' #i' | | #p1' #p2' #Hembed' ] [ 1,3,4: #Habsurd destruct (Habsurd) ] elim sg normalize nodelta @intsize_eq_elim_elim [ 1,3: #_ #Habsurd destruct (Habsurd) | 2,4: #Heq destruct (Heq) normalize nodelta @(match (division_s (bitsize_of_intsize sz') i i') with [ None ⇒ ? | Some bv' ⇒ ? ]) [ 1: normalize #Habsurd destruct (Habsurd) | 2: normalize #Heq destruct (Heq) /3 by ex_intro, conj, vint_eq/ | 3,4: elim sz' in i' i; #i' #i normalize in match (pred_size_intsize ?); generalize in match division_u; #division_u normalize @(match (division_u ???) with [ None ⇒ ? | Some bv ⇒ ?]) normalize nodelta #H destruct (H) /3 by ex_intro, conj, vint_eq/ ] ] | 3: whd in match (sem_div ????); whd in match (sem_div ????); normalize nodelta cases (classify_aop ty1 ty2) normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] [ 1,2: #Habsurd destruct (Habsurd) ] | 4: whd in match (sem_div ????); whd in match (sem_div ????); normalize nodelta cases (classify_aop ty1 ty2) normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] #Habsurd destruct (Habsurd) ] qed. lemma mod_value_eq : ∀E,v1,v2,v1',v2',ty1,ty2. value_eq E v1 v2 → value_eq E v1' v2' → ∀r1. (sem_mod v1 ty1 v1' ty2=Some val r1→ ∃r2:val.sem_mod v2 ty1 v2' ty2=Some val r2∧value_eq E r1 r2). #E #v1 #v2 #v1' #v2' #ty1 #ty2 #Hvalue_eq1 #Hvalue_eq2 #r1 @(value_eq_inversion E … Hvalue_eq1) [ | #sz #i | 3: | 4: #p1 #p2 #Hembed ] [ 1: whd in match (sem_mod ????); normalize nodelta cases (classify_aop ty1 ty2) normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] #Habsurd destruct (Habsurd) | 2: whd in match (sem_mod ????); whd in match (sem_mod ????); normalize nodelta cases (classify_aop ty1 ty2) normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] [ 2: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ | #sz' #i' | | #p1' #p2' #Hembed' ] [ 1,3,4: #Habsurd destruct (Habsurd) ] elim sg normalize nodelta @intsize_eq_elim_elim [ 1,3: #_ #Habsurd destruct (Habsurd) | 2,4: #Heq destruct (Heq) normalize nodelta @(match (modulus_s (bitsize_of_intsize sz') i i') with [ None ⇒ ? | Some bv' ⇒ ? ]) [ 1: normalize #Habsurd destruct (Habsurd) | 2: normalize #Heq destruct (Heq) /3 by ex_intro, conj, vint_eq/ | 3,4: elim sz' in i' i; #i' #i generalize in match modulus_u; #modulus_u normalize @(match (modulus_u ???) with [ None ⇒ ? | Some bv ⇒ ?]) normalize nodelta #H destruct (H) /3 by ex_intro, conj, vint_eq/ ] ] | *: whd in match (sem_mod ????); whd in match (sem_mod ????); normalize nodelta cases (classify_aop ty1 ty2) normalize nodelta #foo #bar #Habsurd destruct (Habsurd) ] qed. (* boolean ops *) lemma and_value_eq : ∀E,v1,v2,v1',v2'. value_eq E v1 v2 → value_eq E v1' v2' → ∀r1. (sem_and v1 v1'=Some val r1→ ∃r2:val.sem_and v2 v2'=Some val r2∧value_eq E r1 r2). #E #v1 #v2 #v1' #v2' #Hvalue_eq1 #Hvalue_eq2 #r1 @(value_eq_inversion E … Hvalue_eq1) [ 2: #sz #i @(value_eq_inversion E … Hvalue_eq2) [ 2: #sz' #i' whd in match (sem_and ??); @intsize_eq_elim_elim [ 1: #_ #Habsurd destruct (Habsurd) | 2: #Heq destruct (Heq) normalize nodelta #Heq destruct (Heq) /3 by ex_intro,conj,vint_eq/ ] ] ] normalize in match (sem_and ??); #arg1 destruct normalize in match (sem_and ??); #arg2 destruct normalize in match (sem_and ??); #arg3 destruct normalize in match (sem_and ??); #arg4 destruct qed. lemma or_value_eq : ∀E,v1,v2,v1',v2'. value_eq E v1 v2 → value_eq E v1' v2' → ∀r1. (sem_or v1 v1'=Some val r1→ ∃r2:val.sem_or v2 v2'=Some val r2∧value_eq E r1 r2). #E #v1 #v2 #v1' #v2' #Hvalue_eq1 #Hvalue_eq2 #r1 @(value_eq_inversion E … Hvalue_eq1) [ 2: #sz #i @(value_eq_inversion E … Hvalue_eq2) [ 2: #sz' #i' whd in match (sem_or ??); @intsize_eq_elim_elim [ 1: #_ #Habsurd destruct (Habsurd) | 2: #Heq destruct (Heq) normalize nodelta #Heq destruct (Heq) /3 by ex_intro,conj,vint_eq/ ] ] ] normalize in match (sem_or ??); #arg1 destruct normalize in match (sem_or ??); #arg2 destruct normalize in match (sem_or ??); #arg3 destruct normalize in match (sem_or ??); #arg4 destruct qed. lemma xor_value_eq : ∀E,v1,v2,v1',v2'. value_eq E v1 v2 → value_eq E v1' v2' → ∀r1. (sem_xor v1 v1'=Some val r1→ ∃r2:val.sem_xor v2 v2'=Some val r2∧value_eq E r1 r2). #E #v1 #v2 #v1' #v2' #Hvalue_eq1 #Hvalue_eq2 #r1 @(value_eq_inversion E … Hvalue_eq1) [ 2: #sz #i @(value_eq_inversion E … Hvalue_eq2) [ 2: #sz' #i' whd in match (sem_xor ??); @intsize_eq_elim_elim [ 1: #_ #Habsurd destruct (Habsurd) | 2: #Heq destruct (Heq) normalize nodelta #Heq destruct (Heq) /3 by ex_intro,conj,vint_eq/ ] ] ] normalize in match (sem_xor ??); #arg1 destruct normalize in match (sem_xor ??); #arg2 destruct normalize in match (sem_xor ??); #arg3 destruct normalize in match (sem_xor ??); #arg4 destruct qed. lemma shl_value_eq : ∀E,v1,v2,v1',v2'. value_eq E v1 v2 → value_eq E v1' v2' → ∀r1. (sem_shl v1 v1'=Some val r1→ ∃r2:val.sem_shl v2 v2'=Some val r2∧value_eq E r1 r2). #E #v1 #v2 #v1' #v2' #Hvalue_eq1 #Hvalue_eq2 #r1 @(value_eq_inversion E … Hvalue_eq1) [ | #sz #i | 3: | 4: #p1 #p2 #Hembed ] [ 2: @(value_eq_inversion E … Hvalue_eq2) [ | #sz' #i' | | #p1' #p2' #Hembed' ] [ 2: whd in match (sem_shl ??); cases (lt_u ???) normalize nodelta [ 1: #Heq destruct (Heq) /3 by ex_intro,conj,vint_eq/ | 2: #Habsurd destruct (Habsurd) ] | *: whd in match (sem_shl ??); #Habsurd destruct (Habsurd) ] | *: whd in match (sem_shl ??); #Habsurd destruct (Habsurd) ] qed. lemma shr_value_eq : ∀E,v1,v2,v1',v2',ty,ty'. value_eq E v1 v2 → value_eq E v1' v2' → ∀r1. (sem_shr v1 ty v1' ty'=Some val r1→ ∃r2:val.sem_shr v2 ty v2' ty'=Some val r2∧value_eq E r1 r2). #E #v1 #v2 #v1' #v2' #ty #ty' #Hvalue_eq1 #Hvalue_eq2 #r1 @(value_eq_inversion E … Hvalue_eq1) [ | #sz #i | 3: | 4: #p1 #p2 #Hembed ] whd in match (sem_shr ????); whd in match (sem_shr ????); [ 1: cases (classify_aop ty ty') normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] #Habsurd destruct (Habsurd) | 2: cases (classify_aop ty ty') normalize nodelta [ 1: #sz #sg | 2: #ty1' #ty2' ] [ 2: #Habsurd destruct (Habsurd) ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ | #sz' #i' | | #p1' #p2' #Hembed' ] [ 1,3,4: #Habsurd destruct (Habsurd) ] elim sg normalize nodelta cases (lt_u ???) normalize nodelta #Heq destruct (Heq) /3 by ex_intro, conj, refl, vint_eq/ | *: cases (classify_aop ty ty') normalize nodelta #foo #bar #Habsurd destruct (Habsurd) ] qed. lemma eq_offset_translation : ∀delta,x,y. cmp_offset Ceq (offset_plus x delta) (offset_plus y delta) = cmp_offset Ceq x y. * #delta * #x * #y whd in match (offset_plus ??); whd in match (offset_plus ??); whd in match cmp_offset; normalize nodelta whd in match eq_offset; normalize nodelta @(eq_bv_elim … x y) [ 1: #Heq >Heq >eq_bv_true @refl | 2: #Hneq lapply (injective_inv_addition_n … x y delta Hneq) @(eq_bv_elim … (addition_n offset_size x delta) (addition_n offset_size y delta)) [ 1: #H * #H' @(False_ind … (H' H)) | 2: #_ #_ @refl ] ] qed. lemma neq_offset_translation : ∀delta,x,y. cmp_offset Cne (offset_plus x delta) (offset_plus y delta) = cmp_offset Cne x y. * #delta * #x * #y whd in match (offset_plus ??); whd in match (offset_plus ??); whd in match cmp_offset; normalize nodelta whd in match eq_offset; normalize nodelta @(eq_bv_elim … x y) [ 1: #Heq >Heq >eq_bv_true @refl | 2: #Hneq lapply (injective_inv_addition_n … x y delta Hneq) @(eq_bv_elim … (addition_n offset_size x delta) (addition_n offset_size y delta)) [ 1: #H * #H' @(False_ind … (H' H)) | 2: #_ #_ @refl ] ] qed. (* Note that x and y are bounded below and above, similarly for the shifted offsets. This is enough to prove that there is no "wrap-around-the-end" problem, /provided we know that the block bounds are implementable by 2^16 bitvectors/. We axiomatize it for now. *) axiom cmp_offset_translation : ∀op,delta,x,y. (Zoo x) + (Zoo delta) < Z_two_power_nat 16 → (Zoo y) + (Zoo delta) < Z_two_power_nat 16 → cmp_offset op x y = cmp_offset op (offset_plus x delta) (offset_plus y delta). lemma valid_pointer_of_implementable_block_is_implementable : ∀m,b. block_implementable_bv m b → ∀o. valid_pointer m (mk_pointer b o) = true → Zoo o < Z_two_power_nat 16. #m #b whd in ⊢ (% → ?); * #Hlow #Hhigh * #o #Hvalid cases (valid_pointer_to_Prop … Hvalid) * #_ #Hlow' #Hhigh' cases Hlow -Hlow #Hlow0 #Hlow16 cases Hhigh -Hhigh #Hhigh0 #Hhigh16 whd in match (Zoo ?); @(transitive_Zlt … Hhigh' Hhigh16) qed. lemma cmp_value_eq : ∀E,v1,v2,v1',v2',ty,ty',m1,m2. value_eq E v1 v2 → value_eq E v1' v2' → memory_inj E m1 m2 → ∀op,r1. (sem_cmp op v1 ty v1' ty' m1 = Some val r1 → ∃r2:val.sem_cmp op v2 ty v2' ty' m2 = Some val r2∧value_eq E r1 r2). #E #v1 #v2 #v1' #v2' #ty #ty' #m1 #m2 #Hvalue_eq1 #Hvalue_eq2 #Hinj #op #r1 elim m1 in Hinj; #contmap1 #nextblock1 #Hnextblock1 elim m2 #contmap2 #nextblock2 #Hnextblock2 #Hinj whd in match (sem_cmp ??????) in ⊢ ((??%?) → %); cases (classify_cmp ty ty') normalize nodelta [ 1: #tsz #tsg @(value_eq_inversion E … Hvalue_eq1) normalize nodelta [ 1: #Habsurd destruct (Habsurd) | 3: #Habsurd destruct (Habsurd) | 4: #p1 #p2 #Hembed #Habsurd destruct (Habsurd) ] #sz #i @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ 1: #Habsurd destruct (Habsurd) | 3: #Habsurd destruct (Habsurd) | 4: #p1 #p2 #Hembed #Habsurd destruct (Habsurd) ] #sz' #i' cases tsg normalize nodelta @intsize_eq_elim_elim [ 1,3: #Hneq #Habsurd destruct (Habsurd) | 2,4: #Heq destruct (Heq) normalize nodelta #Heq destruct (Heq) [ 1: cases (cmp_int ????) whd in match (of_bool ?); | 2: cases (cmpu_int ????) whd in match (of_bool ?); ] /3 by ex_intro, conj, vint_eq/ ] | 3: #ty1 #ty2 #Habsurd destruct (Habsurd) | 2: lapply Hinj -Hinj generalize in match (mk_mem contmap1 ??); #m1 generalize in match (mk_mem contmap2 ??); #m2 #Hinj #optn #typ @(value_eq_inversion E … Hvalue_eq1) normalize nodelta [ 1: #Habsurd destruct (Habsurd) | 2: #sz #i #Habsurd destruct (Habsurd) | 4: #p1 #p2 #Hembed ] @(value_eq_inversion E … Hvalue_eq2) normalize nodelta [ 1,5: #Habsurd destruct (Habsurd) | 2,6: #sz #i #Habsurd destruct (Habsurd) | 4,8: #q1 #q2 #Hembed' ] [ 2,3: cases op whd in match (sem_cmp_mismatch ?); #Heq destruct (Heq) [ 1,3: %{Vfalse} @conj try @refl @vint_eq | 2,4: %{Vtrue} @conj try @refl @vint_eq ] | 4: cases op whd in match (sem_cmp_match ?); #Heq destruct (Heq) [ 2,4: %{Vfalse} @conj try @refl @vint_eq | 1,3: %{Vtrue} @conj try @refl @vint_eq ] ] #Hvalid lapply (if_opt_inversion ???? Hvalid) -Hvalid * #Hconj lapply (andb_inversion … Hconj) -Hconj * #Hvalid #Hvalid' lapply (mi_valid_pointers … Hinj q1 q2 Hvalid' Hembed') #Hvalid2' >Hvalid2' lapply (mi_valid_pointers … Hinj p1 p2 Hvalid Hembed) #Hvalid2 >Hvalid2 normalize nodelta (* >(mi_valid_pointers … Hinj p1' p2' Hvalid' Hembed') >(mi_valid_pointers … Hinj p1 p2 Hvalid Hembed) normalize nodelta *) elim p1 in Hvalid Hembed; #bp1 #op1 elim q1 in Hvalid' Hembed'; #bq1 #oq1 #Hvalid' #Htmp #Hvalid lapply Htmp -Htmp whd in match (pointer_translation ??); whd in match (pointer_translation ??); @(eq_block_elim … bp1 bq1) [ 1: #Heq destruct (Heq) normalize nodelta lapply (mi_nowrap … Hinj bq1) whd in ⊢ (% → ?); cases (E bq1) normalize nodelta [ #_ #Habsurd destruct ] * #BLO #OFF normalize nodelta * * #Hbq1_implementable #HBLO_implementable #Hno_wrap #Heq1 #Heq2 #Heq3 destruct >eq_block_b_b normalize nodelta lapply (cmp_offset_translation op OFF op1 oq1 ??) [ @(Zlt_sum_weaken … Hno_wrap) cases (valid_pointer_to_Prop … Hvalid') * #_ #_ try // | @(Zlt_sum_weaken … Hno_wrap) cases (valid_pointer_to_Prop … Hvalid) * #_ #_ try // ] #Heq (not_eq_block … H) normalize nodelta %{Vfalse} @conj try @refl %2 | 4: #_ >(not_eq_block … H) normalize nodelta >(not_eq_block … H) normalize nodelta %{Vtrue} @conj try @refl %2 ] destruct (H) generalize in match BLOq in Hnowrap1 Hnowrap2 Hvalid2 Hvalid2' ⊢ %; #target_block * * #Himplq1 #Himpltarget #Hnowrap_q1 * * #Himplp1 #_ #Hnowrap_p1 #Hvalid2 #Hvalid2' lapply (valid_pointer_to_Prop … Hvalid) lapply (valid_pointer_to_Prop … Hvalid') * * #_ #Hlowq #Hhighq * * #_ #Hlowp #Hhighp >eq_block_b_b normalize nodelta * [ 2,4: whd in ⊢ (% → ?); #Habsurd @False_ind cases (valid_pointer_to_Prop … Hvalid') * #_ #Hle #Hlt lapply (Zlt_to_Zle_to_Zlt … Hlt Habsurd) #Hlt' lapply (Zlt_to_Zle_to_Zlt … Hlt' Hle) #Hlt_refl @(absurd … Hlt_refl (irreflexive_Zlt ?)) ] * [ 2,4: whd in ⊢ (% → ?); #Habsurd @False_ind cases (valid_pointer_to_Prop … Hvalid) * #_ #Hle #Hlt lapply (Zlt_to_Zle_to_Zlt … Hlt Habsurd) #Hlt' lapply (Zlt_to_Zle_to_Zlt … Hlt' Hle) #Hlt_refl @(absurd … Hlt_refl (irreflexive_Zlt ?)) ] * #Hdisjoint whd in match (cmp_offset); normalize nodelta whd in match (eq_offset); normalize nodelta whd in match (offset_plus ??); whd in match (offset_plus ??); lapply (valid_pointer_of_implementable_block_is_implementable … Himpltarget … Hvalid2) lapply (valid_pointer_of_implementable_block_is_implementable … Himpltarget … Hvalid2') #Himpl1 #Himpl2 [ 1,3: (* We show the non-equality of the rhs by exhibiting disjointness of blocks. This is made * painful by invariants on non-overflowing offsets. We exhibit [a]+[b] < [c]+[d], then * cut [a+b]<[c+d] (using a subcut for needed consistency hypotheses) and conclude easily * [a+b] ≠ [c+d]. *) cut ((Z_of_unsigned_bitvector ? (offv op1)) + (Z_of_unsigned_bitvector ? (offv DELTAp)) < (Z_of_unsigned_bitvector ? (offv oq1)) + (Z_of_unsigned_bitvector ? (offv DELTAq))) [ 1,3: @(Zlt_to_Zle_to_Zlt ? (high_bound m1 bp1 + Zoo DELTAp) ?) [ 1,3: @Zlt_translate assumption | 2,4: @(transitive_Zle ? (low_bound m1 bq1+Zoo DELTAq) ?) try assumption @monotonic_Zle_Zplus_l try assumption ] ] #Hlt_stepA cut (Z_of_unsigned_bitvector ? (addition_n offset_size (offv op1) (offv DELTAp)) < Z_of_unsigned_bitvector ? (addition_n offset_size (offv oq1) (offv DELTAq))) [ 1,3: >Z_addition_bv_commute [ 2,4: @(transitive_Zlt … Hnowrap_p1) @Zlt_translate try assumption ] >Z_addition_bv_commute [ 2,4: @(transitive_Zlt … Hnowrap_q1) @Zlt_translate try assumption ] try assumption ] -Hlt_stepA | 2,4: cut ((Z_of_unsigned_bitvector ? (offv oq1)) + (Z_of_unsigned_bitvector ? (offv DELTAq)) < (Z_of_unsigned_bitvector ? (offv op1)) + (Z_of_unsigned_bitvector ? (offv DELTAp))) [ 1,3: @(Zlt_to_Zle_to_Zlt ? (high_bound m1 bq1 + Zoo DELTAq) ?) [ 1,3: @Zlt_translate assumption | 2,4: @(transitive_Zle ? (low_bound m1 bp1+Zoo DELTAp) ?) try assumption @monotonic_Zle_Zplus_l try assumption ] ] #Hlt_stepA cut (Z_of_unsigned_bitvector ? (addition_n offset_size (offv oq1) (offv DELTAq)) < Z_of_unsigned_bitvector ? (addition_n offset_size (offv op1) (offv DELTAp))) [ 1,3: >Z_addition_bv_commute [ 2,4: @(transitive_Zlt … Hnowrap_q1) @Zlt_translate try assumption ] >Z_addition_bv_commute [ 2,4: @(transitive_Zlt … Hnowrap_p1) @Zlt_translate try assumption ] try assumption ] -Hlt_stepA ] generalize in match (addition_n ? (offv op1) ?); #lhs generalize in match (addition_n ? (offv oq1) ?); #rhs #Hlt_stepB lapply (Zlt_to_Zneq … Hlt_stepB) @(eq_bv_elim … lhs rhs) [ 1,3,5,7: #Heq destruct * #H @(False_ind … (H (refl ??))) | 2,4,6,8: #Hneq #_ /3 by ex_intro, conj, vint_eq/ ] ] ] qed.