source: Deliverables/addenda/indexed_labels/report.tex @ 1673

Last change on this file since 1673 was 1673, checked in by tranquil, 7 years ago

report on indexed labels
TODO: corrections, examples, etc

File size: 63.6 KB
RevLine 
[1673]1
2\documentclass[11pt,epsf,a4wide]{article}
3\usepackage{../../style/cerco}
4\usepackage{pdfpages}
5
6\usepackage{graphics}
7
8% For SLNCS comment above and use
9% \documentclass{llncs}
10
11
12
13
14
15\RequirePackage[latin1]{inputenc}     
16 
17% Mettre les différents packages et fonctions que l'on utilise
18\usepackage[english]{babel} 
19\usepackage{amsmath} 
20\usepackage{amsfonts} 
21\usepackage{amssymb} 
22\usepackage{xspace} 
23\usepackage{latexsym} 
24\usepackage{url} 
25\usepackage{xspace} 
26%\usepackage{fancyvrb}
27\usepackage[all]{xy} 
28%packages pour LNCS
29%\usepackage{semantic}
30%\usepackage{cmll}
31% Packages for RR
32\usepackage{graphics,color}             
33\RequirePackage[latin1]{inputenc}   
34\usepackage{array}
35 
36%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
37%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
38 
39\newenvironment{comment}{{\bf MORE WORK:}} 
40 
41\newenvironment{restate-proposition}[2][{}]{\noindent\textbf{Proposition~{#2}}
42\;\textbf{#1}\  
43}{\vskip 1em} 
44 
45\newenvironment{restate-theorem}[2][{}]{\noindent\textbf{Theorem~{#2}}\;\textbf{
46#1}\  
47}{\vskip 1em} 
48 
49\newenvironment{restate-corollary}[2][{}]{\noindent\textbf{Corollary~{#2}}
50\;\textbf{#1}\  
51}{\vskip 1em} 
52 
53\newcommand{\myparagraph}[1]{\medskip\noindent\textbf{#1}} 
54 
55\newcommand{\Proofitemb}[1]{\medskip \noindent {\bf #1\;}} 
56\newcommand{\Proofitemfb}[1]{\noindent {\bf #1\;}} 
57\newcommand{\Proofitem}[1]{\medskip \noindent $#1\;$} 
58\newcommand{\Proofitemf}[1]{\noindent $#1\;$} 
59\newcommand{\Defitem}[1]{\smallskip \noindent $#1\;$} 
60\newcommand{\Defitemt}[1]{\smallskip \noindent {\em #1\;}} 
61\newcommand{\Defitemf}[1]{\noindent $#1\;$} 
62 
63 
64%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
65%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
66 
67 
68 
69%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
70%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
71 
72\newcommand{\eqdef}{=_{\text{def}}} 
73\newcommand{\concat}{\cdot}%%{\mathbin{+}}
74\newcommand{\Int}{\mathit{int}} 
75\newcommand{\nat}{\mathit{nat}} 
76\newcommand{\String}{\mathit{string}} 
77\newcommand{\Ident}{\mathit{ident}} 
78\newcommand{\Block}{\mathit{block}} 
79\newcommand{\Signature}{\mathit{signature}} 
80 
81\newcommand{\pc}{\mathit{pc}} 
82\newcommand{\estack}{\mathit{estack}} 
83\newcommand{\Error}{\epsilon} 
84 
85%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
86%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
87 
88% --------------------------------------------------------------------- %
89% Proof rule.                                                           %
90% --------------------------------------------------------------------- %
91 
92\newcommand{\staterule}[3]{%
93  $\begin{array}{@{}l}%
94   \mbox{#1}\\%
95   \begin{array}{c}
96   #2\\ 
97   \hline 
98   \raisebox{0ex}[2.5ex]{\strut}#3%
99   \end{array}
100  \end{array}$} 
101 
102\newcommand{\GAP}{2ex} 
103 
104\newcommand{\recall}[2]{%
105 $\begin{array}{c}
106 #1\\ 
107 \hline 
108 \raisebox{0ex}[2.5ex]{\strut}#2%
109 \end{array}$} 
110 
111\newcommand{\hbra}{\noindent\hbox to \textwidth{\leaders\hrule height1.8mm
112depth-1.5mm\hfill}} 
113\newcommand{\hket}{\noindent\hbox to \textwidth{\leaders\hrule
114height0.3mm\hfill}} 
115\newcommand{\ratio}{.3} 
116 
117\newenvironment{display}[1]{\begin{tabbing} 
118  \hspace{1.5em} \= \hspace{\ratio\linewidth-1.5em} \= \hspace{1.5em} \= \kill 
119  \noindent\hbra\\[-.5em] 
120  {\ }\textsc{#1}\\[-.8ex] 
121  \hbox to \textwidth{\leaders\hrule height1.6mm depth-1.5mm\hfill}\\[-.8ex] 
122  }{\\[-.8ex]\hket 
123  \end{tabbing}} 
124 
125 
126\newcommand{\sbline}{\hfill\smash[t]{\rule[1.5em]{\textwidth}{0.2ex}
127\hfill\hspace*{0ex}}} 
128\newcommand{\sline}{\hfill\smash[t]{\rule[1.5em]{\textwidth}{0.1ex}
129\hfill\hspace*{0ex}}} 
130\newcommand{\sentry}[2]{\>$#1$\>\ \smash[t]{\vrule width 0.2mm height
131    1.2\baselineskip depth 1.5\baselineskip}\>#2} 
132 
133\newcommand{\entry}[2]{\>$#1$\>\>#2} 
134\newcommand{\clause}[2]{$#1$\>\>#2} 
135\newcommand{\category}[2]{\clause{#1::=}{#2}} 
136\newcommand{\subclause}[1]{\>\>\>#1} 
137\newcommand{\redrule}[3]{$#1$\>\>$#2$\>\>\>#3} 
138 
139%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
140%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
141 
142% environments
143
144% To be commented for LNCS
145 \newtheorem{theorem}{Theorem} 
146 \newtheorem{fact}[theorem]{Fact} 
147 \newtheorem{definition}[theorem]{Definition} 
148 \newtheorem{lemma}[theorem]{Lemma} 
149 \newtheorem{corollary}[theorem]{Corollary} 
150 \newtheorem{proposition}[theorem]{Proposition} 
151 \newtheorem{example}[theorem]{Example} 
152 \newtheorem{exercise}[theorem]{Exercise} 
153 \newtheorem{remark}[theorem]{Remark} 
154 \newtheorem{question}[theorem]{Question}
155 \newtheorem{proviso}[theorem]{Proviso} 
156  \newtheorem{conjecture}[theorem]{Conjecture}
157
158% proofs 
159 
160\newcommand{\Proof}{\noindent {\sc Proof}. } 
161\newcommand{\Proofhint}{\noindent {\sc Proof hint}. } 
162% To be commented for LNCS
163 \newcommand{\qed}{\hfill${\Box}$} 
164\newcommand{\EndProof}{\qed}
165 
166% figure environment
167 
168\newcommand{\Figbar}{{\center \rule{\hsize}{0.3mm}}}
169 %horizontal thiner line for figures
170\newenvironment{figureplr}{\begin{figure}[t] \Figbar}{\Figbar \end{figure}} 
171%environment for figures
172%       ************Macros for mathematical symbols*************
173% Style
174 
175\newcommand{\cl}[1]{{\cal #1}}          % \cl{R} to make R calligraphic
176\newcommand{\la}{\langle}               % the brackets for pairing (see also \pair)
177\newcommand{\ra}{\rangle} 
178 
179\newcommand{\lf}{\lfloor} 
180\newcommand{\rf}{\rfloor} 
181\newcommand{\ul}[1]{\underline{#1}}     % to underline
182\newcommand{\ol}[1]{\overline{#1}}      % to overline
183\newcommand{\ok}{~ok}                   % well formed context
184 
185% Syntax
186 
187\newcommand{\Gives}{\vdash}             % in a type judgment
188\newcommand{\IGives}{\vdash_{I}}        % intuitionistic provability
189\newcommand{\AIGives}{\vdash_{{\it AI}}} %affine-intuitionistic provability
190\newcommand{\CGives}{\vdash_{C}}        % affine-intuitionistic confluent provability
191
192
193\newcommand{\Models}{\mid \! =}              % models
194
195\newcommand{\emptycxt}{\On}              % empty context
196\newcommand{\subs}[2]{[#1 / #2]} 
197\newcommand{\sub}[2]{[#2 / #1]}         % substitution \sub{x}{U} gives [U/x]
198 
199\newcommand{\Sub}[3]{[#3 / #2]#1}       % Substitution with three arguments \Sub{V}{x}{U}
200
201\newcommand{\lsub}[2]{#2 / #1}          % substitution \lsub{x}{U} gives U/x, to  be used in a list.
202 
203\newcommand{\impl}{\supset} 
204\newcommand{\arrow}{\rightarrow}        % right thin arrow
205\newcommand{\trarrow}{\stackrel{*}{\rightarrow}}        % trans closure
206%\newcommand{\limp}{\makebox[5mm]{\,$- \! {\circ}\,$}}   % linear
207                                % implication
208\newcommand{\limp}{\multimap} %linear implication
209\newcommand{\bang}{\, !} 
210% LNCS
211%\newcommand{\bang}{\oc}
212\newcommand{\limpe}[1]{\stackrel{#1}{\multimap}}
213\newcommand{\hyp}[3]{#1:(#2, #3)}
214\newcommand{\letm}[3]{{\sf let} \ ! #1 = #2 \ {\sf in} \ #3}    % modal let
215\newcommand{\lets}[3]{{\sf let} \ #1 = #2 \ {\sf in} \ #3}    % simple let
216\newcommand{\letp}[3]{{\sf let} \ \S #1 = #2 \ {\sf in} \ #3}    % paragraph let
217\newcommand{\tertype}{{\bf 1}}
218\newcommand{\behtype}{{\bf B}}
219\newcommand{\bt}[1]{{\it BT}(#1)}       % Boehm tree
220\newcommand{\cxt}[1]{#1[~]}             % Context with one hole
221\newcommand{\pr}{\parallel}             % parallel ||
222\newcommand{\Nat}{\mathbf{N}}                 % natural numbers
223\newcommand{\Natmax}{\mathbf{N}_{{\it max}}}  % natural numbers with minus infinity
224\newcommand{\Rat}{\mathbf{Q}^{+}}                 % non-negative rationals
225\newcommand{\Ratmax}{\mathbf{Q}^{+}_{{\it max}}}  % non-negative rationals with minus infinity
226\newcommand{\Alt}{ \mid\!\!\mid  } 
227\newcommand{\isum}{\oplus} 
228\newcommand{\csum}{\uplus}              %context sum
229\newcommand{\dpar}{\mid\!\mid} 
230                                        % for the production of a grammar containing \mid
231\newcommand{\infer}[2]{\begin{array}{c} #1 \\ \hline #2 \end{array}} 
232                                        % to make a centered inference rule
233 
234% (Meta-)Logic
235 
236\newcommand{\bool}{{\sf bool}}          % boolean values
237\newcommand{\Or}{\vee}                  % disjunction
238\newcommand{\OR}{\bigvee}               % big disjunction
239\newcommand{\AND}{\wedge}               % conjunction
240\newcommand{\ANDD}{\bigwedge}           % big conjunction
241\newcommand{\Arrow}{\Rightarrow}        % right double arrow
242\newcommand{\IFF}{\mbox{~~iff~~}}       % iff in roman and with spaces
243\newcommand{\iffArrow}{\Leftrightarrow} % logical equivalence
244 
245% Semantics
246 
247\newcommand{\dl}{[\![}                  % semantic [[
248\newcommand{\dr}{]\!]}                  % semantic ]]
249\newcommand{\lam}{{\bf \lambda}}        % semantic lambda
250
251
252% The equivalences for this paper
253
254% the usual ones
255\newcommand{\ubis}{\approx^u}          % usual labelled weak bis
256\newcommand{\uabis}{\approx^{u}_{ccs}} % usual labelled weak bis on CCS
257
258% the contextual conv sensitive
259\newcommand{\cbis}{\approx}        % convergence sensitive bis
260\newcommand{\cabis}{\approx_{ccs}}  % convergence sensitive bis on CCS
261
262% the labelled conv sensitive
263\newcommand{\lcbis}{\approx^{\ell}} %
264\newcommand{\lcabis}{\approx^{\ell}_{ccs}} % labelled convergence sensitive bis on CCS
265\newcommand{\lcbiswrong}{\approx^{\ell \Downarrow}} %
266
267
268
269
270
271
272
273\newcommand{\maytest}{=_{\Downarrow}}
274\newcommand{\musttest}{=_{\Downarrow_{S}}}
275
276
277 
278 
279% Sets
280 
281\newcommand{\prt}[1]{{\cal P}(#1)}      % Parts of a set
282\newcommand{\finprt}[1]{{\cal P}_{fin}(#1)}% Finite parts
283\newcommand{\finprtp}[1]{{\cal P}_{fin}^{+}(#1)}% Non-empty Finite parts
284\newcommand{\union}{\cup}               % union
285\newcommand{\inter}{\cap}               % intersection
286\newcommand{\Union}{\bigcup}            % big union
287\newcommand{\Inter}{\bigcap}            % big intersection
288\newcommand{\cpl}[1]{#1^{c}}            % complement
289\newcommand{\card}{\sharp}              % cardinality
290\newcommand{\minus}{\backslash}         % set difference
291\newcommand{\sequence}[2]{\{#1\}_{#2}}  % ex. \sequence{d_n}{n\in \omega}
292\newcommand{\comp}{\circ}               % functional composition
293%\newcommand{\oset}[1]{\{#1\}}            % set enumeration
294\newcommand{\mset}[1]{\{\! | #1 |\!\}}  % pseudo-set notation {| |}
295 
296% Domains
297 
298\newcommand{\two}{{\bf O}}              % Sierpinski space
299\newcommand{\join}{\vee}                % join
300\newcommand{\JOIN}{\bigvee}             % big join 
301\newcommand{\meet}{\wedge}              % meet
302\newcommand{\MEET}{\bigwedge}           % big meet
303\newcommand{\dcl}{\downarrow}           % down closure
304\newcommand{\ucl}{\uparrow}             % up closure
305\newcommand{\conv}{\downarrow}          % synt. conv. pred. (postfix)
306\newcommand{\diver}{\uparrow}           % diverging term
307\newcommand{\Conv}{\Downarrow}          % sem. conv. pred. (postfix)
308\newcommand{\SConv}{\Downarrow_{S}}          % sem. conv. pred. (postfix)
309\newcommand{\CConv}{\Downarrow_{C}}
310\newcommand{\Diver}{\Uparrow}           % diverging map
311\newcommand{\cpt}[1]{{\cal K}(#1)}      % compacts, write \cpt{D}
312\newcommand{\ret}{\triangleleft}        % retract
313\newcommand{\nor}{\succeq}
314\newcommand{\prj}{\underline{\ret}}     % projection
315\newcommand{\parrow}{\rightharpoonup}   % partial function space
316\newcommand{\ub}[1]{{\it UB}(#1)}       % upper bounds
317\newcommand{\mub}[1]{{\it MUB}(#1)}     % minimal upper bounds
318\newcommand{\lift}[1]{(#1)_{\bot}}      % lifting
319\newcommand{\forget}[1]{\underline{#1}} % forgetful translation
320
321%\newcommand{\rel}[1]{\;{\cal #1}\;}     % infix relation (calligraphic)
322\newcommand{\rl}[1]{\;{\cal #1}\;}             % infix relation
323\newcommand{\rel}[1]{{\cal #1}}         %calligraphic relation with no
324                                        %extra space
325\newcommand{\per}[1]{\;#1 \;} 
326\newcommand{\wddagger}{\natural}  % weak suspension
327%\newcommand{\wddagger}{=\!\!\!\!\parallel}  % weak suspension
328% Categories
329 
330\newcommand{\pair}[2]{\langle #1 , #2 \rangle} % pairing \pair{x}{y}, do not use < >.
331 
332%               *******  Notation for the $\pi$-calculus *********
333% Syntax:
334 
335 
336\newcommand{\fn}[1]{{\it fn}(#1)}                       % free names
337\newcommand{\bn}[1]{{\it bn}(#1)}                       % bound names
338\newcommand{\names}[1]{{\it n}(#1)}                     % names
339\newcommand{\true}{{\sf t}}                             % true
340\newcommand{\false}{{\sf f}}                            % false
341\newcommand{\pio}{\pi_1}                                % 1 receptor calculus
342\newcommand{\pioo}{\pi_{1}^{r}} 
343\newcommand{\piom}{\pi_{1}^{-}}                         % 1 receptor calculus wo match
344\newcommand{\pioi}{\pi_{1I}}                    % 1 receptor I-calculus
345\newcommand{\pifo}{\pi_{\w{1f}}}                                % functional calculus
346\newcommand{\pilo}{\pi_{\w{1l}}}                                % located calculus
347\newcommand{\sort}[1]{{\it st}(#1)}                     % sort
348\newcommand{\ia}[1]{{\it ia}(#1)}                     % sort
349\newcommand{\ite}[3]{{\sf if~} #1 {\sf ~then~} #2 {\sf ~else~} #3}      %if then else
350\newcommand{\casep}[2]{{\sf case}^{\times}(#1, \pair{x}{y}\Arrow#2)}      %case on pairs
351\newcommand{\casel}[3]{{\sf case}^{L}(#1, #2, \s{cons}(x,y)\Arrow#3)}      %case on lists
352\newcommand{\caseb}[3]{{\sf case}^{b}(#1, #2, \s{cons}(x,y)\Arrow#3)}      %case on lists
353\newcommand{\nil}{{\sf nil}} 
354\newcommand{\cons}{{\sf cons}} 
355\newcommand{\idle}[1]{{\it Idle}(#1)}                   %idle process
356\newcommand{\conf}[1]{\{ #1 \}}                         %configuration
357\newcommand{\link}[2]{#1 \mapsto #2}                    %likn a ->b
358\newcommand{\mand}{\mbox{ and }} 
359\newcommand{\dvec}[1]{\tilde{{\bf #1}}}                 %double vector
360\newcommand{\erloc}[1]{{\it er}_{l}(#1)}                % location erasure
361\newcommand{\w}[1]{{\it #1}}    %To write in math style
362\newcommand{\vcb}[1]{{\bf #1}} 
363\newcommand{\lc}{\langle\!|} 
364\newcommand{\rc}{|\!\rangle} 
365\newcommand{\obj}[1]{{\it obj}(#1)} 
366\newcommand{\move}[1]{{\sf move}(#1)} 
367\newcommand{\qqs}[2]{\forall\, #1\;\: #2} 
368\newcommand{\qtype}[4]{\forall #1 :  #2 . (#4,#3)} 
369\newcommand{\xst}[2]{\exists\, #1\;\: #2} 
370\newcommand{\xstu}[2]{\exists\, ! #1\;\: #2} 
371\newcommand{\dpt}{\,:\,} 
372\newcommand{\cond}[3]{\mathsf{if}\ #1\ \mathsf{then}\ #2\ \mathsf{else}\ #3} 
373\newcommand{\s}[1]{{\sf #1}}    % sans-serif 
374\newcommand{\vc}[1]{{\bf #1}} 
375\newcommand{\lnorm}{\lbrack\!\lbrack} 
376\newcommand{\rnorm}{\rbrack\!\rbrack} 
377\newcommand{\sem}[1]{\underline{#1}} 
378\newcommand{\tra}[1]{\langle #1 \rangle}
379\newcommand{\trb}[1]{[ #1 ]}
380\newcommand{\squn}{\mathop{\scriptstyle\sqcup}} 
381\newcommand{\lcro}{\langle\!|} 
382\newcommand{\rcro}{|\!\rangle} 
383\newcommand{\semi}[1]{\lcro #1\rcro} 
384\newcommand{\sell}{\,\ell\,} 
385\newcommand{\SDZ}[1]{\marginpar{\textbf{SDZ:} {#1}}} 
386 
387\newcommand{\when}[3]{{\sf when}~#1~{\sf then}~#2~{\sf else}~#3} 
388\newcommand{\wthen}[2]{{\sf when}~#1~{\sf then}~#2~} 
389\newcommand{\welse}[1]{{\sf else}~#1} 
390
391%Pour la fleche double, il faut rajouter :
392%      \usepackage{mathtools}
393
394\newcommand{\act}[1]{\xrightarrow{#1}} %labelled actionlow %high
395
396\newcommand{\lact}[1]{\stackrel{#1}{\makebox[5mm]{\,$- \! {\circ}\,$}}}
397
398\newcommand{\set}[1]{\{#1\}}
399\newcommand{\pst}[2]{{\sf pset}(#1,#2)}
400\newcommand{\st}[2]{{\sf set}(#1,#2)}
401\newcommand{\wrt}[2]{{\sf w}(#1,#2)}
402
403\newcommand{\chtype}[2]{{\it Ch_{#1}(#2)}}
404\newcommand{\rgtype}[2]{{\it {\sf Reg}_{#1} #2}}
405
406\newcommand{\get}[1]{{\sf get}(#1)}
407
408%\newcommand{\wact}[1]{\xRightarrow{#1}} %weak labelled action low high
409
410%\newcommand{\mact}[1]{\xrightarrow{#1}_{m}} %labelled action low %high
411
412%\newcommand{\wmact}[1]{\xRightarrow{#1}_{m}} %weak labelled action low high
413
414%\newcommand{\act}[1]{\stackrel{#1}{\rightarrow}} %labelled action low
415                                %%%high
416
417\newcommand{\acteq}[1]{\stackrel{#1}{\leadsto}} %labelled action low
418                                %%%high
419
420
421%\newcommand{\actI}[1]{\stackrel{#1}{\rightarrow_{1}}} %labelled action low
422\newcommand{\actI}[1]{\xrightarrow{#1}_{1}}
423
424%\newcommand{\actII}[1]{\stackrel{#1}{\rightarrow_{2}}} %labelled action low
425\newcommand{\actII}[1]{\xrightarrow{#1}_{2}}
426
427
428 \newcommand{\wact}[1]{\stackrel{#1}{\Rightarrow}} %weak labelled action low high
429\newcommand{\wactI}[1]{\stackrel{#1}{\Rightarrow_{1}}} %weak labelled action low high
430\newcommand{\wactII}[1]{\stackrel{#1}{\Rightarrow_{2}}} %weak labelled action low high
431
432
433\newcommand{\mact}[1]{\stackrel{#1}{\rightarrow_{m}}} %labelled action low
434%high
435\newcommand{\wmact}[1]{\stackrel{#1}{\Rightarrow_{m}}} %weak labelled action low high
436 
437%\newcommand{\lact}[1]{\stackrel{#1}{\leftarrow}}
438\newcommand{\lwact}[1]{\stackrel{#1}{\Leftarrow}} 
439 
440 
441 
442\newcommand{\eval}{\Downarrow} 
443\newcommand{\Eval}[1]{\Downarrow^{#1}} 
444 
445 
446\newcommand{\Z}{{\bf Z}} 
447\newcommand{\Real}{\mathbb{R}^{+}} 
448\newcommand{\Return}{\ensuremath{\mathtt{return}}\xspace}                 
449\newcommand{\Stop}{\ensuremath{\mathtt{stop}}\xspace} 
450\newcommand{\Wait}{\ensuremath{\mathtt{wait}}\xspace} 
451\newcommand{\Read}{\ensuremath{\mathtt{read}}\xspace} 
452\newcommand{\Write}{\ensuremath{\mathtt{write}}\xspace} 
453\newcommand{\Yield}{\ensuremath{\mathtt{yield}}\xspace} 
454\newcommand{\Next}{\ensuremath{\mathtt{next}}\xspace} 
455\newcommand{\Load}{\ensuremath{\mathtt{load}}\xspace} 
456\newcommand{\Call}{\ensuremath{\mathtt{call}}\xspace} 
457\newcommand{\Tcall}{\ensuremath{\mathtt{tcall}}\xspace} 
458\newcommand{\Pop}{\ensuremath{\mathtt{pop}}\xspace} 
459\newcommand{\Build}{\ensuremath{\mathtt{build}}\xspace} 
460\newcommand{\Branch}{\ensuremath{\mathtt{branch}}\xspace} 
461\newcommand{\Goto}{\ensuremath{\mathtt{goto}}\xspace} 
462 
463\newcommand{\hatt}[1]{#1^{+}} 
464\newcommand{\Of}{\mathbin{\w{of}}} 
465 
466\newcommand{\susp}{\downarrow} 
467\newcommand{\lsusp}{\Downarrow_L} 
468\newcommand{\wsusp}{\Downarrow} 
469\newcommand{\commits}{\searrow} 
470 
471 
472\newcommand{\spi}{S\pi} 
473 
474
475 \newcommand{\pres}[2]{#1\triangleright #2} %TCCS else next (alternative)
476% \newcommand{\pres}[2]{ \lfloor #1 \rfloor (#2)}  %TCCS else next
477\newcommand{\present}[3]{{\sf present} \ #1 \ {\sf do } \ #2 \ {\sf  else} \ #3}
478
479
480\newcommand{\tick}{{\sf tick}}          %tick action
481
482 
483 
484\newcommand{\sbis}{\equiv_L} 
485\newcommand{\emit}[2]{\ol{#1}#2} 
486%\newcommand{\present}[4]{#1(#2).#3,#4}
487\newcommand{\match}[4]{[#1=#2]#3,#4}       %pi-equality
488
489\newcommand{\matchv}[4]{[#1 \unrhd #2]#3,#4}
490
491\newcommand{\new}[2]{\nu #1 \ #2} 
492\newcommand{\outact}[3]{\new{{\bf #1}}{\emit{#2}{#3}}} 
493\newcommand{\real}{\makebox[5mm]{\,$\|\!-$}}% realizability relation
494
495\newcommand{\regterm}[2]{{\sf reg}_{#1} #2}
496\newcommand{\thread}[1]{{\sf thread} \ #1}
497\newcommand{\store}[2]{(#1 \leftarrow #2)}
498\newcommand{\pstore}[2]{(#1 \Leftarrow #2)}
499
500\newcommand{\regtype}[2]{{\sf Reg}_{#1} #2}
501\newcommand{\uregtype}[3]{{\sf Reg}_{#1}(#2, #3)}
502\newcommand{\urtype}[2]{{\sf Reg}(#1, #2)}
503
504\newcommand{\upair}[2]{[#1,#2]}
505\newcommand{\letb}[3]{\mathsf{let}\;\oc #1 = #2\;\mathsf{in}\;#3}
506
507\newcommand{\vlt}[1]{{\cal V}(#1)}
508\newcommand{\prs}[1]{{\cal P}(#1)}
509
510\newcommand{\imp}{{\sf Imp}}            %imp language
511\newcommand{\vm}{{\sf Vm}}              %virtual machine language
512\newcommand{\mips}{{\sf Mips}}          %Mips language
513\newcommand{\C}{{\sf C}}                % C language
514\newcommand{\Clight}{{\sf Clight}}        %C light language
515\newcommand{\Cminor}{{\sf Cminor}}
516\newcommand{\RTLAbs}{{\sf RTLAbs}}
517\newcommand{\RTL}{{\sf RTL}}
518\newcommand{\ERTL}{{\sf ERTL}}
519\newcommand{\LTL}{{\sf LTL}}
520\newcommand{\LIN}{{\sf LIN}}
521\newcommand{\access}[1]{\stackrel{#1}{\leadsto}}
522\newcommand{\ocaml}{{\sf ocaml}}
523\newcommand{\coq}{{\sf Coq}}
524\newcommand{\compcert}{{\sf CompCert}}
525%\newcommand{\cerco}{{\sf CerCo}}
526\newcommand{\cil}{{\sf CIL}}
527\newcommand{\scade}{{\sf Scade}}
528\newcommand{\absint}{{\sf AbsInt}}
529\newcommand{\framac}{{\sf Frama-C}}
530\newcommand{\powerpc}{{\sf PowerPc}}
531\newcommand{\lustre}{{\sf Lustre}}
532\newcommand{\esterel}{{\sf Esterel}}
533\newcommand{\ml}{{\sf ML}}
534
535\newcommand{\codeex}[1]{\texttt{#1}}   % code example
536
537\bibliographystyle{abbrv} 
538
539\title{
540INFORMATION AND COMMUNICATION TECHNOLOGIES\\
541(ICT)\\
542PROGRAMME\\
543\vspace*{1cm}Project FP7-ICT-2009-C-243881 \cerco{}}
544
545\date{ }
546\author{}
547%>>>>>> new commands used in this document
548% \usepackage[nohyperref,nosvn]{mystyle}
549
550\usepackage{multirow}
551\newcolumntype{b}{@{}>{{}}}
552\newcolumntype{B}{@{}>{{}}c<{{}}@{}}
553\newcolumntype{h}[1]{@{\hspace{#1}}}
554\newcolumntype{L}{>{$}l<{$}}
555\newcolumntype{C}{>{$}c<{$}}
556\newcolumntype{R}{>{$}r<{$}}
557\newcolumntype{S}{>{$(}r<{)$}}
558\newcolumntype{n}{@{}}
559\newcommand{\spanr}[2]{\multicolumn{1}{Rn}{\multirow{#1}{*}{(#2)}}}
560\def\nocol{\multicolumn{1}{ncn}{}}
561
562\usepackage[disable, colorinlistoftodos]{todonotes}
563\usepackage{enumerate}
564\usepackage{tikz}
565
566\newcommand{\tern}[3]{#1\mathrel ? #2 : #3}
567\newcommand{\sop}[1]{\s{#1}\ }
568\newcommand{\sbin}[1]{\ \s{#1}\ }
569\newcommand{\Ell}{\mathcal L}
570\newcommand{\alphab}{\boldsymbol\alpha}
571\newcommand{\betab}{\boldsymbol\beta}
572\newcommand{\gramm}{\mathrel{::=}}
573\newcommand{\ass}{\mathrel{:=}}
574%<<<<<<<<<<<<
575\begin{document}
576% \thispagestyle{empty}
577%
578% \vspace*{-1cm}
579% \begin{center}
580% \includegraphics[width=0.6\textwidth]{../style/cerco_logo.png}
581% \end{center}
582%
583% \begin{minipage}{\textwidth}
584% \maketitle
585% \end{minipage}
586%
587%
588% \vspace*{0.5cm}
589% \begin{center}
590% \begin{LARGE}
591% \bf
592% Report \\
593% Dependent Cost Labels
594% \\
595% \end{LARGE}
596% \end{center}
597%
598% \vspace*{2cm}
599% \begin{center}
600% \begin{large}
601% Version 0.1
602% \end{large}
603% \end{center}
604%
605% \vspace*{0.5cm}
606% \begin{center}
607% \begin{large}
608% Main Author:\\
609% Paolo Tranquilli
610% \end{large}
611% \end{center}
612%
613% \vspace*{\fill}
614% \noindent
615% Project Acronym: \cerco{}\\
616% Project full title: Certified Complexity\\
617% Proposal/Contract no.: FP7-ICT-2009-C-243881 \cerco{}\\
618%
619% \clearpage \pagestyle{myheadings} \markright{\cerco{}, FP7-ICT-2009-C-243881}
620%
621% \newpage
622
623\listoftodos
624
625\section{Introduction}
626In~\cite{D2.1} an approach is presented tackling the problem of building a
627compiler from a large fragment of C which is capable of lifting execution cost information
628from the compiled code and present it to the user. This endeavour is central to
629the CerCo project, which strives to produce a mechanically certified version of
630such a compiler.
631
632In rough words, the proposed approach consists in decorating the source code with
633labels at key points, and preserving such labels along the compilation chain.
634Once the final object code is produced, such labels should correspond to parts
635of the compiled code which have constant cost.
636
637There are two properties one asks of the cost estimate. The first, paramount to
638the correctness of the method, is \emph{soundness}: the actual execution cost
639is bounded by the estimate. In the labelling approach, this is guaranteed if every
640loop in the control flow of the compiled code passes through at least one cost
641label. The second, optional but desirable, is \emph{preciseness}: the estimate
642is not in fact an estimate but the actual cost. In the labelling approach, this will
643true if for every label every possible execution of the compiled code starting
644from such a label yields the same cost. In simple architectures such as the 8051
645micro-controller this can be guaranteed by having labels at the immediate start of any
646branch of the control flow, and by ensuring no duplicate labels are present.
647
648It should be underlined that the above mentioned requirements must hold when
649executing the code at the
650end of the compilation chain. If one is careful to inject the labels at good
651key places in the source code, one can still think of two main obstacles:
652\begin{itemize}
653 \item compilation introduces important changes in the control flow, inserting
654loops or branches: an example in addressing this is the insertion of functions
655in the source code replacing instructions unavailable in the target architecture
656that require loops (e.g.\ multi-word division and generic shift in the 8051
657architecture) so that the annotation process be sound, or the effort put in
658providing unbranching translations of higher level instructions~
659\cite{D2.2};
660 \item even if the compiled code \emph{does}, as long as the the syntactic
661  control flow graph is concerned, respect the conditions for soundness and
662  preciseness, the cost of blocks of instructions might not be independent of context,
663  so that different passes through a label might have different costs: this
664  becomes a concern if one wishes to apply the approach to more complex architectures,
665  for example one with cache or pipelining.
666\end{itemize}
667
668The first point unveils a weakness of the current labelling approach when it
669comes to some common code transformations done along a compilation chain. In
670particular most of \emph{loop optimizations} disrupt such conditions directly.
671For example in what we will call \emph{loop peeling} a first iteration of the
672loop is hoisted ahead of it, possibly to have a different cost than later iterations
673because of further transformation such as dead code elimination or invariant
674code motion.
675
676The second point strikes a difference in favour to existing tools for the
677estimate of execution costs to the detriment of CerCo's approach advocated in~
678\cite{D2.1}. We will take as example the well known tool \s{aiT}~\cite{absint},
679based on abstract interpretation: such a tool allows to estimate the
680worst-case execution time (WCET) taking into account advanced aspects of the
681target architecture. \s{aiT}'s ability to
682do close estimates of execution costs even when these depend on the context of
683execution would enhance the effectiveness of CerCo's compiler, either by
684integrating such techniques in its development, or by directly calling this tool
685when ascertaining the cost of blocks of compiled code. A typical case where
686cache analysis yields a difference in the execution cost of a block is in loops:
687the first iteration will usually stumble upon more cache misses than subsequent
688iterations.
689
690If one looks closely, the source of the weakness of the labelling approach as
691presented in~\cite{D2.1} is common to both points: the inability to state
692different costs for different occurrences of labels, where the difference
693might be originated by labels being duplicated along compilation
694or the costs being sensitive to the state of execution.
695
696The preliminary work
697we present here addresses this node, introducing cost labels that are dependent
698on which iteration of its containing loops it occurs in. This is achieved by
699means of \emph{indexed labels}: all cost labels are decorated with formal
700indexes coming from the loops containing such labels. These indexes allow to
701rebuild, even after several steps of loop transformations,
702which iterations of the original loops in the source code a particular label
703occurrence belongs to. During annotation of source code, this information
704is given to the user by means of dependent costs.
705
706We concentrate on integrating the labelling approach with two loop transformations.
707For general information on compiler optimization (and loop optimizations in
708particular) we refer the reader to the vast literature on the subject
709(e.g.~\cite{muchnick,morgan}).
710
711\emph{Loop peeling}, as already mentioned, consists in preceding the loop with
712a copy of its body, appropriately guarded. This is in general useful to trigger
713further optimizations, such as ones relying on execution information which can
714be computed at compile time but which is erased by further iterations of the loop,
715or ones that use the hoisted code to be more effective at eliminating redundant
716code. Integrating this transformation to the labelling approach would also allow
717to integrate the common case of cache analysis explained above: the analysis
718of cache hits and misses in the case of usually benefits from a form of
719\emph{virtual} loop peeling~\cite{cacheprediction}.
720
721\emph{Loop unrolling} consists in repeating several times the body of the loop
722inside the loop itself (inserting appropriate guards, or avoiding them altogether
723if enough information on the loop's guard is available at compile time). This
724can limit the number of (conditional or unconditional) jumps executed by the
725code and trigger further optimizations dealing with pipelining, if applicable
726to the architecture.
727
728While covering only two loop optimizations, the work presented here poses good
729bases to extending the labelling approach to cover more and more of them, as well
730as giving hints as to how integrating in CerCo's compiler advanced cost estimation
731techniques such as cache analysis. Moreover loop peeling has the substantial
732advantage of enhancing other optimizations. Experimentation with CerCo's
733untrusted prototype compiler with constant propagation and
734partial redundancy elimination~\cite{PRE,muchnick} show how loop peeling enhances
735those other optimizations.
736
737\paragraph{Outline.}
738We will present our approach on a minimal toy imperative language, \imp{}
739with gotos, which we present in \autoref{sec:defimp} along with formal
740definition of the loop transformations. This language already
741presents most of the difficulties encountered when dealing with C, so
742we stick to it for the sake of this presentation. In \autoref{sec:labelling}
743we summarize the labelling approach as presented in~\cite{D2.1}. The following
744\autoref{sec:indexedlabels} explains \emph{indexed labels}, our proposal for
745dependent labels which are able to describe precise costs even in the presence
746of the loop transformations we consider. Finally \autoref{sec:conc} goes into
747more details regarding the implementation of indexed labels in CerCo's
748untrusted compiler and speculates on further work on the subject.
749
750
751\section{\imp{} with goto}\label{sec:defimp}
752We briefly outline the toy language which contains all the relevant instructions
753to present the development and the problems it is called to solve.
754
755The version of the minimal imperative language \imp that we will present has,
756with respect to the barebone usual version, \s{goto}s and labelled statements.
757Break and continue statements can be added at no particular expense. Its syntax
758and operational semantics is presented in \autoref{fig:minimp}. The actual
759grammar for expressions is not particularily relevant so we do not give a
760precise one. For the sake of this presentation we also trat boolean and
761arithmetic expressions together (with the usual convention of an expression
762being true iff non-zero). We will consistently suppose programs are
763\emph{well-labelled}, i.e.\ every label labels at most one occurrence of statement
764in the program, and every \s{goto} points to a label actually present in the program.
765\begin{figure}
766$$\begin{gathered}
767\begin{array}{nlBl>(R<)n}
768\multicolumn{4}{C}{\bfseries Syntax}\\
769\multicolumn{4}{ncn}{
770  \ell,\ldots \hfill \text{(labels)} \hfill x,y,\ldots \hfill
771\text{(identifiers)}
772\hfill e,f,\ldots \hfill \text{(expression)}
773}\\
774P,S,T,\ldots &\gramm& \s{skip} \mid s;t
775\mid \sop{if}e\sbin{then}s\sbin{else}t
776\mid \sop{while} e \sbin{do} s \mid
777  x \ass e
778\\&\mid&
779\ell : s \mid \sop{goto}\ell& \spanr{-2}{statements}\\
780\\
781\multicolumn{4}{C}{\bfseries Semantics}\\
782K,\ldots  &\gramm& \s{halt} \mid S \cdot K & continuations
783\end{array}
784\\[15pt]
785\s{find}(\ell,S,K) \ass
786\left\{\begin{array}{lL}
787\bot & if $S=\s{skip},\sop{goto} \ell'$ or $x\ass e$,\\
788(T, K) & if $S=\ell:T$,\\
789\s{find}(\ell,T,K) & otherwise, if $S = \ell':T$,\\
790\s{find}(\ell,T_1,T_2\cdot K) & if defined and $S=T_1;T_2$,\\
791\s{find}(\ell,T_1,K) & if defined and
792$S=\sop{if}b\sbin{then}T_1\sbin{else}T_2$,\\
793\s{find}(\ell,T_2,K) & otherwise, if $S=T_1;T_2$ or
794$\sop{if}b\sbin{then}T_1\sbin{else}T_2$,\\
795\s{find}(\ell,T,S\cdot K) & if $S = \sop{while}b\sbin{do}T$.
796\end{array}\right.
797\\[15pt]
798\begin{array}{lBl}
799(x:=e,K,s)  &\to_P& (\s{skip},K,s[v/x]) \qquad\mbox{if }(e,s)\eval v \\ \\
800
801(S;T,K,s)  &\to_P& (S,T\cdot K,s) \\ \\
802
803(\s{if} \ b \ \s{then} \ S \ \s{else} \ T,K,s)
804&\to_P&\left\{
805\begin{array}{ll}
806(S,K,s) &\mbox{if }(b,s)\eval v \neq 0 \\
807(T,K,s) &\mbox{if }(b,s)\eval 0
808\end{array}
809\right. \\ \\
810
811
812(\s{while} \ b \ \s{do} \ S ,K,s)
813&\to_P&\left\{
814\begin{array}{ll}
815(S,\s{while} \ b \ \s{do} \ S \cdot K,s) &\mbox{if }(b,s)\eval v \neq 0 \\
816(\s{skip},K,s) &\mbox{if }(b,s)\eval 0
817\end{array}
818\right. \\ \\
819
820
821(\s{skip},S\cdot K,s)  &\to_P&(S,K,s) \\ \\
822
823(\ell : S, K, s)  &\to_P& (S,K,s) \\ \\
824
825(\sop{goto}\ell,K,s)  &\to_P& (\s{find}(\ell,P,\s{halt}),s) \\ \\
826\end{array}
827\end{gathered}$$
828
829
830\caption{The syntax and operational semantics of \imp.}
831\label{fig:minimp}
832\end{figure}
833
834
835
836
837\paragraph{Further down the compilation chain.}
838As for the rest of the compilation chain, we abstract over it. We just posit
839every language $L$ further down the compilation chain has a suitable notion of
840sequential instructions (with one natural successor), to which we can add our
841own.
842
843\subsection{Loop transformations}
844We call a loop $L$ \emph{single-entry} in $P$ if there is no \s{goto} of $P$
845outside of $L$ which jumps to within $L$\footnote{This is a reasonable
846aproximation: it considers multi-entry also those loops having an external but
847unreachable \s{goto} jumping to them.}. Many loop optimizations do not preserve
848the semantics of multi-entry loops in general, or are otherwise rendered
849ineffective. Usually compilers implement a multi-entry loop detection which
850avoids those loops from being targeted by optimizations~\cite{muchnick,morgan}.
851
852
853\paragraph{Loop peeling.}
854$$
855\sop{while}b\sbin{do}S\mapsto
856\sop{if}b\sbin{then} S; \sop{while} b \sbin{do} S[\ell'_i/\ell_i]
857$$
858where $\ell'_i$ is a fresh label for any $\ell_i$ labelling a statement in $S$.
859This relabelling is safe as to \s{goto}s external to the loop because of the
860single-entry condition. Notice that in case of break and continue statements,
861those should be replaced with \s{goto}s in the peeled body $S$.
862
863\paragraph{Loop unrolling.}
864$$
865\sop{while}b\sbin{do}S\mapsto
866\sop{while} b \sbin{do} (S ;
867  \sop{if} b \sbin{then} (S[\ell^1_i/\ell_i] ;
868  \cdots
869  \sop{if} b \sbin{then} S[\ell^n_i/\ell_i]) \cdots)
870$$
871where $\ell^j_i$ are again fresh labels for any $\ell_i$ labelling a statement
872in $S$. This is a willingly naïf version of loop unrolling, which usually
873targets less general loops. The problem it poses to Cerco's labelling approach
874are independent of the cleverness of the actual transformation.
875
876\section{Labelling: a quick sketch of the previous approach}\label{sec:labelling}
877Plainly labelled \imp{} is obtained adding to the code \emph{cost labels}
878(with metavariables $\alpha,\beta,\ldots$), and cost-labelled statements:
879$$S,T\gramm \cdots \mid \alpha: S$$
880Cost labels allow for some program points to be tracked along the compilation
881chain. For further details we refer to\cite{D2.1}.
882
883With labels the small step semantics turns into a labelled transition
884system and a natural notion of trace (i.e.\ lists of labels) arises.
885Evaluation of statements is enriched with traces, so that rules are like
886$$
887\begin{array}{lblL}
888(\alpha: S, K,s) &\to[\alpha]_P (S,K,s)\\
889(\s{skip}, S \cdot K,s) &\to[\varepsilon]_P (S, K, s)\\
890& \text{etc.}
891\end{array}$$
892
893\paragraph{Labelling.}
894Given an \imp{} program $P$ its \emph{labelling} $\alpha:\Ell(P)$ in $\ell-\imp$
895is
896defined by putting cost labels after every branching, at the start of both
897branches, and a cost label at the beginning of the program. So the relevant
898cases are
899$$\begin{aligned}
900  \Ell(\sop{if}e\sbin{then}S\sbin{else}T) &=
901    \sop{if}e\sbin{then}\alpha:\Ell(S)\sbin{else}\beta:\Ell(T)\\
902  \Ell(\sop{while}e\sbin{do}S) &=
903    (\sop{while}e\sbin{do}\alpha:\Ell(S));\beta:\s{skip}
904  \end{aligned}$$
905where $\alpha,\beta$ are fresh cost labels, and while in other cases the
906definition just passes to sub-statements.
907
908\paragraph{Labels in the rest of the compilation chain.} All languages further
909down the chain get a new sequential statement $\sop{emit}\alpha$ whose effect is
910to be consumed in a labelled transition while keeping the same state. All other
911instructions guard their operational semantics and do not emit cost labels.
912
913Preservation of semantics throughout the compilation process is restated, in
914rough terms, as
915$$\text{starting state of $P$}\to[\lambda]\!\!^*\;\text{halting state} \iff
916  \text{starting state of $\mathcal C(P)$} \to[\lambda]\!\!^*\;\text{halting state},$$
917where $P$ is a program of a language along the compilation chain, starting and
918halting states depend on the language, and $\mathcal C$ is the
919compilation function\footnote{The case of divergent computations needs
920to be addressed too. Also, the requirement can be weakened by demanding some
921sort of equivalence of the traces rather than equality. Both these issues escape
922the scope of this presentation.}.
923
924\paragraph{Instrumentations}
925Let $\mathcal C$ be the whole compilation from $\ell\imp$ to the labelled
926version of some low-level language $L$. Supposing such compilation has not
927introduced any new loop or branching, we have that
928\begin{itemize}
929 \item every loop contains at least a cost label (\emph{soundness condition}),
930and
931 \item every branching has different labels for the two branches
932  (\emph{preciseness condition}).
933\end{itemize}
934With these two conditions, we have that each and every cost label in
935$\mathcal C(P)$ for any $P$ corresponds to a block of sequential instructions,
936to which we can assign a constant \emph{cost}\footnote{This in fact requires the
937machine architecture to be simple enough, or for some form of execution analysis
938to take place.} We can therefore assume a \emph{cost mapping} $\kappa_P$ from
939cost labels to natural numbers, assigning to each cost label $\alpha$ the cost
940of the block containing the single occurrance of $\alpha$.
941
942Given any cost mapping $\kappa$, we can enrich a labelled program so that a
943particular fresh variable (the \emph{cost variable} $c$) keeps track of the
944assumulation of costs during the execution. We call this procedure
945\emph{instrumentation} of the program, and it is defined recursively by
946$$
947  \mathcal I(\alpha:S) = c \ass c + \kappa(\alpha) ; \mathcal I(S)
948$$
949while in all other cases the definition passes to substatements.
950
951\paragraph{The problem with loop optimizations.}
952Let us take loop peeling, and apply it to the labelling of a program without any
953adjustment:
954$$
955(\sop{while}e\sbin{do}\alpha:S);\beta:\s{skip}
956\mapsto
957(\sop{if}b\sbin{then} \alpha : S; \sop{while} b \sbin{do} \alpha :
958S[\ell'_i/\ell_i]);
959\beta:\s{skip}$$
960What happens is that the cost label $\alpha$ is duplicated into two distinct
961occurrences. If these two occurrences correspond to different costs in the
962compiled code, the best the cost mapping can do is to take the maximum of the
963two, preserving soundness (i.e.\ the cost estimate still bounds the actual one)
964but loosing preciseness (i.e.\ the actual cost would be strictly less than its
965estimate).
966
967\section{Indexed labels}\label{sec:indexedlabels}
968This section presents the core of the new approach. In brief points it amounts
969to the following.
970\begin{enumerate}[\bfseries \ref*{sec:indexedlabels}.1.]
971 \item\label{en:outline1}
972Enrich cost labels with formal indexes corresponding, at the beginning of
973the process, to which iteration of the loop they belong to.
974 \item\label{en:outline2}
975Each time a loop transformation is applied and a cost labels is split in
976different occurrences, each of these will be reindexed so that every time they
977are emitted their position in the original loop will be reconstructed.
978 \item\label{en:outline3}
979Along the compilation chain, add alongside the \s{emit} instruction other
980ones updating the indexes, so that iterations of the original loops can be
981rebuilt at the operational semantics level.
982 \item\label{en:outline4}
983The machinery computing the cost mapping will still work, but assigning
984costs to indexed cost labels, rather than to cost labels as we wish: however
985\emph{dependent costs} can be calculated, where dependency is on which iteration
986of the containing loops we are in.
987\end{enumerate}
988
989\subsection{Indexing the cost labels}\label{ssec:indlabs}
990\paragraph{Formal indexes and $\iota\ell\imp$.}
991Let $i_0,i_1,\ldots$ be a sequence of distinguished fresh identifiers that will
992be used as loop indexes. A \emph{simple expression} is an affine arithmetical
993expression in one of these indexes, that is $a*i_k+b$ with $a,b,k \in \mathbb N$.
994Simple expressions $e_1=a_1*i_k+b_1$, $e_2=a2*i_k+b_2$ in the same index can be
995composed, yielding $e_1\circ e_2\ass (a_1a_2)*i_k + (a_1b2+b_1)$, and this operation
996has an identity element in $id_k \ass 1*i_k+0$. Constants can be expressed as simple
997expressions, so that we identify a natural $c$ with $0*i_k+c$.
998
999An \emph{indexing} (with metavariables $I$, $J$, \ldots) is a list of
1000transformations of successive formal indexes dictated by simple expressions,
1001that is a mapping%
1002\footnote{Here we restrict each mapping to be a simple expression
1003\emph{on the same index}. This might not be the case if more loop optimizations
1004are accounted for (for example, interchanging two nested loops).}
1005$$i_0\mapsto a_0*i_0+b_0,\dots, i_{k-1} \mapsto a_{k-1}*i_{k-1}+b_{k-1}.$$
1006
1007An \emph{indexed cost label} (metavariables $\alphab$, $\betab$, \ldots) is
1008the combination of a cost label $\alpha$ and an indexing $I$, written
1009$\alpha\la I\ra$. The cost label underlying an indexed one is called its
1010\emph{atom}. All plain labels can be considered as indexed ones by taking
1011an empty indexing.
1012
1013\imp{} with indexed labels ($\iota\ell\imp$) is defined by adding to $\imp$
1014statements with indexed labels, and by having loops with formal indexes
1015attached to them:
1016$$S,T,\ldots \gramm \cdots i_k:\sop{while}e\sbin{do}S\mid \alphab : S.$$
1017Notice than unindexed loops still are in the language: they will correspond
1018to multi-entry loops which are ignored by indexing and optimizations.
1019We will discuss the semantics later.
1020
1021\paragraph{Indexed labelling.}
1022Given an $\imp$ program $P$, in order to index loops and assign indexed labels
1023we must first of all distinguish single-entry loops. We just sketch how it can
1024be computed.
1025
1026A first pass of the program $P$ can easily compute two maps: $\s{loopof}_P$
1027from each label $\ell$ to the occurrence (i.e.\ the path) of a $\s{while}$ loop
1028containing $\ell$, or the empty path if none exists; and $\s{gotosof}_P$ from
1029a label $\ell$ to the occurrences of \s{goto}s pointing to it. Then the set
1030$\s{multyentry}_P$ of multy-entry loops of $P$ can be computed by adding to it
1031all occurrences $p$ such that there exists a label $\ell$ and an occurrence
1032$q$ with $\s{loopof}_P(\ell)=p$, $q\in \s{gotosof}_P(\ell)$ and $p\not\le q$
1033(where $\le$ is the prefix relation)\footnote{Possible simplification to this
1034procedure include keeping track just of while loops containing labels and
1035\s{goto}s (rather than paths in the syntactic tree of the program), and making
1036two passes while avoiding building the map to sets $\s{gotosof}$}.
1037
1038Let $Id_k$ be the indexing of length $k$ made from identity simple expressions,
1039i.e.\ the sequence $i_0\mapsto id_0, \ldots , i_{k-1}\mapsto id_{k-1}$. We define the tiered indexed labelling
1040$\Ell^\iota_P (S,k)$ in program $P$ for occurrence $S$ of a statement in $P$
1041and a natural $k$ by recursion, setting
1042$$
1043\Ell^\iota_P(S,k)\ass
1044\left\{
1045\begin{array}{lh{-100pt}l}
1046 (i_k:\sop{while}b\sbin{do}\alpha\la Id_{k+1}\ra : \Ell^\iota_P(T,k+1));\beta\la Id_k \ra : \s{skip}
1047\\& \text{if $S=\sop{while}b\sbin{do}T$ and $S\notin \s{multyentry}_P$,}\\[3pt]
1048(\sop{while}b\sbin{do}\alpha\la Id_k \ra : \Ell^\iota_P(T,k));\beta\la Id_k \ra : \s{skip}
1049\\& \text{otherwise, if $S=\sop{while}b\sbin{do}T$,}\\[3pt]
1050\sop{if}b\sbin{then} \alpha\la Id_k \ra : \Ell^\iota_P(T_1,k) \sbin{else} \beta\la Id_k \ra : \Ell^\iota_P(T_2,k)
1051\\&\text{if $S=\sop{if}b\sbin{then}T_1\sbin{else}T_2$,}\\[3pt]
1052\ldots
1053\end{array}
1054\right.
1055$$
1056where as usual $\alpha$ and $\beta$ are to be fresh cost labels, and other cases just keep
1057making the recursive calls on the substatements. The \emph{indexed labelling} of
1058a program $P$ is then defined as $\alpha\la \ra : \Ell^\iota_P(P,0)$, i.e.\ a
1059further fresh unindexed cost label is added at the start, and we start from level $0$.
1060
1061In other words: each single-entry loop is indexed by $i_k$ where $k$ is the number of
1062other single-entry loops containing this one, and all cost labels under the scope
1063of a single-entry loop indexed by $i_k$ are indexed by all indexes $i_0,\ldots,i_k$,
1064without any transformation.
1065
1066\subsection{Indexed labels and loop transformations}
1067We define the \emph{reindexing} $I \circ (i_k\mapsto a*i_k+b)$ as an operator
1068on indexings by setting
1069\begin{multline*}
1070(i_0\mapsto e_0,\ldots, i_k \mapsto e_k,\ldots,i_n\mapsto e_n)
1071\circ(i_k\mapsto a*i_k+b)
1072\ass\\
1073i_0\mapsto e_0,\ldots, i_k \mapsto e_k \circ(a*i_k+b),\ldots,i_n\mapsto e_n,
1074\end{multline*}
1075and extending then to indexed labels (by $\alpha\la I\ra\circ(i_k\mapsto e)\ass
1076\alpha\la I\circ (i_k\mapsto e)\ra$) and to statements in $\iota\ell\imp$
1077(by applying the above transformation to all indexed labels).
1078
1079We can then redefine loop peeling and loop unrolling taking into account indexed labels.
1080It will be possible to apply the transformation only to indexed loops, that is loops
1081that are single-entry. The attentive reader will notice that no assumption is
1082made on the labelling of the statements involved. In particular the transformation
1083can be repeated and composed at will. Also, notice that erasing all labelling
1084information (i.e.\ indexed cost labels and loop indexes) we recover exactly the
1085same transformations presented in \autoref{sec:defimp}.
1086
1087\paragraph{Indexed loop peeling.}
1088
1089$$
1090i_k:\sop{while}b\sbin{do}S\mapsto
1091\sop{if}b\sbin{then} S\circ (i_k\mapsto 0); i_k : \sop{while} b \sbin{do} S[\ell'_i/\ell_i]\circ(i_k\mapsto i_k + 1)
1092$$
1093As can be expected, the peeled iteration of the loop gets reindexed as always
1094being the first iteration of the loop, while the iterations of the remaining
1095loop get shifted by $1$.
1096
1097\paragraph{Indexed loop unrolling.}
1098$$
1099\begin{array}{l}
1100\begin{array}{ncn}
1101i_k:\sop{while}b\sbin{do}S\\
1102\tikz\node[rotate=-90,inner sep=0pt]{$\mapsto$};
1103\end{array}\\
1104i_k:\sop{while} b \sbin{do}\\
1105\quad (S\circ(i_k\mapsto n*i_k) ;\\
1106\quad \sop{if} b \sbin{then}\\
1107\quad\quad (S[\ell^1_i/\ell_i]\circ(i_k\mapsto n*i_k+1) ;\\
1108\quad\quad\quad \vdots \\
1109\quad\quad \quad \sop{if} b \sbin{then}\\
1110\quad \quad \quad \quad S[\ell^n_i/\ell_i]\circ(i_k\mapsto n*i_k+n-1)
1111)\cdots )
1112\end{array}
1113$$
1114Again, the reindexing is as can be expected: each copy of the unrolled body
1115has its indexes remapped so that when they are executed the original iteration
1116of the loop to which they correspond can be recovered.
1117
1118\subsection{Semantics and compilation of indexed labels}
1119
1120In order to make sense of loop indexes, one must keep track of their values
1121in the state. A \emph{constant indexing} (metavariables $C,\ldots$) is an
1122indexing which employs only constant simple expressions. The evaluation
1123of an indexing $I$ in a constant indexing $C$, noted $I|_C$, is defined
1124by
1125$$I\circ(i_0\mapsto c_0,\ldots, i_{k-1}\mapsto c_{k-1}) \ass
1126  \alphab\circ(i_0\mapsto c_0)\circ\cdots\circ(i_{k-1}\mapsto c_{k-1})$$
1127(using the definition of ${-}\circ{-}$ given in \autoref{ssec:indlabs}), considering it defined only
1128if the the resulting indexing is a constant one too\footnote{For example
1129$(i_0\mapsto 2*i_0,i_1\mapsto i_1+1)|_{i_0\mapsto 2}$ is undefined,
1130but $(i_0\mapsto 2*i_0,i_1\mapsto 0)|_{i_0\mapsto 2}=
1131i_0\mapsto 4,i_1\mapsto 2$, which is indeed a constant indexing,
1132even if the domain of the original indexing is not covered by the constant one.}.
1133The definition is extended to indexed labels by $\alpha\la I\ra|_C\ass \alpha\la I|_C\ra$.
1134
1135Constant indexings will be used to keep track of the exact iterations of the
1136original code the emitted labels belong to. We thus define two basic actions to
1137update constant indexings: $C[i_k{\uparrow}]$ which increments the value of
1138$i_k$ by one, and $C[i_k{\downarrow}0]$ which resets it to $0$.
1139
1140We are ready to update the definition of the operational semantics of
1141indexed labelled \imp. The emitted cost labels will now be ones indexed by
1142constant indexings. We add loop index increments as constructors to continuations%
1143\footnote{This is unneeded if we keep track of active loops (like is necessary
1144in the presence of \s{continue} and \s{break} statements).}:
1145$$K,\ldots  \gramm \cdots | i_k{\uparrow} \cdot K$$
1146The state will now be a 4-uple
1147$(S,K,s,C)$ which adds a constant indexing to the 3-uple of regular
1148semantics. The small-step rules for all statements but the
1149cost-labelled ones and the indexed loops remain the same, without
1150touching the $C$ parameter (in particular unindexed loops behave the same
1151as usual). The remaining cases are:
1152$$\begin{aligned}
1153   (\alphab : S,K,s,C) &\to[\alphab|_C]_P (S,K,s,C)\\
1154   (i_k:\sop{while}b\sbin{do}S,K,C) &\to[\varepsilon]_P
1155    \begin{cases}
1156     (S,i_k{\uparrow} \cdot \sop{while}b\sbin{do}S\cdot K,s,C[i_k{\downarrow}0])
1157      & \text{if $(b,s)\eval v\neq 0$,}\\
1158     (\s{skip}, K, s, C) & \text{otherwise}
1159    \end{cases}\\
1160   (\s{skip},i_k{\uparrow} \cdot K,s,C) &\to[\varepsilon]_P (\s{skip}, K, s, C[i_k{\uparrow}])
1161  \end{aligned}$$
1162The starting state with store $s$ for a program $P$ becomes then
1163$(P,\s{halt},s,(i_0\mapsto 0,\dots,i_{n-1}\mapsto 0)$ where $i_0,\ldots,i_{n-1}$ cover
1164all loop indexes of $P$\footnote{For a program which is the indexed labelling of an
1165\imp{} one this corresponds to the maximum nesting of single-entry loops. We can also
1166avoid computing this value in advance if we define $C[i{\downarrow}0]$ to extend
1167$C$'s domain as needed, so that the starting constant indexing can be the empty one.}.
1168
1169\paragraph{Compilation.}
1170Further down the compilation chain the loop
1171structure is usually partially or completely lost. We cannot rely on it anymore
1172to ensure keeping track of original source code iterations. We therefore add
1173alongside the \s{emit} instruction two other sequential instructions
1174$\sop{ind_reset}k$ and $\sop{ind_inc}k$ whose sole effect is to reset to
11750 (resp.\ increment by 1) the loop index $i_k$, as kept track of in a constant
1176indexing accompanying the state.
1177
1178The first step of compilation from $\iota\ell\imp$ consists in prefixing the
1179translation of an indexed loop $i_k:\s{while}\ b\ \s{do}\ S$ with
1180$\sop{ind_reset}k$ and postfixing the translation of its body $S$ with
1181$\sop{ind_inc}k$. Later on in the compilation chain we just need to propagate
1182the instructions dealing with cost labels.
1183
1184We would like to stress the fact that this machinery is only needed to give a
1185suitable semantics of observables on which preservation proofs can be done. By no
1186means the added instructions and the constant indexing in the state are meant
1187to change the actual (let us say denotational) semantics of the programs. In this
1188regard the two new instruction have a similar role as the \s{emit} one. A
1189forgetful mapping of everything (syntax, states, operational semantics rules)
1190can be defined erasing all occurrences of cost labels and loop indexes, and the
1191result will always be a regular version of the language considered.
1192
1193\paragraph{Stating the preservation of semantics.} In fact, the statement of preservation
1194of semantics does not change at all, if not for considering traces of evaluated
1195indexed cost labels rather than traces of plain ones.
1196
1197
1198\subsection{Dependent costs in the source code}\label{ssec:depcosts}
1199The task of producing dependent costs out of the constant costs of indexed labels
1200is quite technical. Before presenting it here, we would like to point out that
1201the annotations produced by the procedure described in this subsection, even
1202if correct, can be enormous and unreadable. In \autoref{sec:conc}, when we will
1203detail the actual implementation, we will also sketch how we mitigated this
1204problem.
1205
1206Having the result of compiling the indexed labelling $\Ell^\iota(P)$ of an \imp{}
1207program $P$, we can still suppose that a cost mapping can be computed, but
1208from indexed labels to naturals. We want to annotate the source code, so we need
1209a way to express and compute costs of cost labels, i.e.\ group the costs of
1210indexed labels to ones of their atoms. In order to do so we introduce
1211\emph{dependent costs}. Let us suppose that for the sole purpose of annotation,
1212we have available in the language ternary expressions of the form
1213$$\tern e {f_1}{f_2},$$
1214and that we have access to common operators on integers such as equality, order
1215and modulus.
1216
1217\paragraph{Simple conditions.}
1218First, we need to shift from \emph{transformations} of loop indexes to
1219\emph{conditions} on them. We identify a set of conditions on natural numbers
1220which are able to express the image of any composition of simple expressions.
1221
1222\emph{Simple conditions} are of three possible forms:
1223\begin{itemize}
1224 \item equality $i_k=n$ for some natural $n$;
1225 \item inequality $i_k\ge n$ for some natural $n$;
1226 \item modular equality together with inequality $i_k\bmod a = b\wedge i_k\ge n$
1227  for naturals $a, b, n$.
1228\end{itemize}
1229The always true simple condition is given by $i_k\ge 0$, and similarly we
1230write $i_k\bmod a = b$ as a simple condition for $i_k\bmod a = b\wedge i_k\ge 0$.
1231Given a simple condition $p$ and a constant indexing $C$ we can easily define
1232when $p$ holds for $C$ (written $p\circ C$). A \emph{dependent cost expression}
1233is an expression built solely out of integer constants and ternary expressions
1234with simple conditions at their head. Given a dependent cost expression $e$ where
1235all of the loop indexes appearing in it are in the domain of a constant indexing
1236$C$, we can define the value $e\circ C\in \mathbb N$ by
1237$$n\circ C\ass n,\qquad (\tern p e f)\circ C\ass
1238\begin{cases}
1239  e\circ C& \text{if $p\circ C$,}\\
1240  f\circ C& \text{otherwise.}
1241\end{cases}$$
1242
1243\paragraph{From indexed costs to dependent ones.}
1244Every simple expression $e$ corresponds to a simple condition $p(e)$ which expresses the
1245set of values that can be the result of it. Following is the definition of such
1246relation. We recall that in this development loop indexes are always mapped to
1247simple expressions over the same index. If it was not the case, the condition
1248obtained from an expression should be on the mapped index, not the indeterminate
1249of the simple expression. We leave to further work all generalizations of what
1250we present here.
1251$$
1252p(a*i_k+b)\ass
1253\begin{cases}
1254i_k = b & \text{if $a = 0$,}\\
1255i_k \ge b & \text{if $a = 1$,}\\
1256i_k\bmod a = b \wedge i_k \ge b & \text{otherwise}.
1257\end{cases}$$
1258Now, suppose we are given a mapping $\kappa$ from indexed labels to natural
1259numbers. We will transform it in a mapping (identified with an abuse of notation
1260with the same symbol $\kappa$) from atoms to \imp{} expressions built with
1261ternary expressions which depend solely on loop indexes. To that end we define
1262an auxiliary function $\kappa^\alpha_L$ parameterized by atoms and words of
1263simple expressions and defined on \emph{sets} of $n$-uples of simple expressions
1264(with $n$ constant across each such set, i.e.\ each set is made of words with
1265the same length).
1266
1267We will employ a bijection between words of simple expressions and indexings,
1268given by\footnote{Lists of simple expressions is in fact how indexings are
1269represented in Cerco's current implementation of the compiler.}
1270$$i_0\mapsto e_0,\ldots,i_{k-1}\mapsto e_{k-1} \cong e_0\cdots e_{k-1}.$$
1271As usual, $\varepsilon$ denotes the empty word/indexing, and juxtaposition
1272word concatenation.
1273
1274For every set $s$ of $n$-uples of simple expressions, we are in one of the following
1275three exclusive cases:
1276\begin{itemize}
1277 \item $S=\emptyset$, or
1278 \item $S=\{\varepsilon\}$, or
1279 \item there is a simple expression $e$ such that $S$ can be decomposed in
1280  $eS'+S''$, with $S'\neq \emptyset$ and none of the words in $S''$ starting with $e$
1281\end{itemize}
1282where $eS'$ denotes prepending $e$ to all elements of $S'$ and $+$ is disjoint
1283union. This classification can serve as the basis of a definition by recursion
1284on $n+\card S$ where $n$ is the size of tuples in $S$ and $\card S$ is its cardinality.
1285Indeed in the third case in $S'$ the size of tuples decreases strictly (and
1286cardinality does not increase) while for $S''$ the size of tuples remains the same
1287but cardinality strictly decreases. The expression $e$ of the third case will be chosen
1288as minimal for some total order\footnote{The specific order used does not change
1289the correctness of the procedure, but different orders can give more or less
1290readable results. A ``good'' order is the lexicographic one, with $a*i_k+b \le a'*i_k+b'$
1291if $a<a'$ or $a=a'$ and $b\le b'$.}.
1292
1293Following is the definition of the auxiliary function $\kappa^\alpha_L$, following
1294the recursion scheme presented above.
1295$$
1296\begin{aligned}
1297\kappa^\alpha_L(\emptyset) &\ass 0\\
1298\kappa^\alpha_L(\{\varepsilon\}) &\ass \kappa(\alpha\la L\ra) \\
1299\kappa^\alpha_L(eS'+S'') &\ass \tern{p(e)}{\kappa^\alpha_{Le}(S')}{\kappa^\alpha_L(S'')}
1300\end{aligned}
1301$$
1302
1303Finally, the wanted dependent cost mapping is defined by
1304$$\kappa(\alpha)\ass\kappa^\alpha_\varepsilon(\{\,L\mid \alpha\la L\ra \text{ appears
1305in the compiled code}\,\}).$$
1306
1307\paragraph{Indexed instrumentation.}
1308The \emph{indexed instrumentation} generalizes the instrumentation presented in
1309\autoref{sec:labelling}.
1310We described
1311above how cost atoms can be mapped to dependent costs. The instrumentation must
1312also insert code dealing with the loop indexes. As instrumentation is done
1313on the code produced by the labelling phase, all cost labels are indexed by
1314identity indexings. The relevant cases of the recursive definition
1315(supposing $c$ is the cost variable) are then
1316$$
1317\begin{aligned}
1318\mathcal I^\iota(\alpha\la Id_k\ra:S) &= c\ass c + \kappa(\alpha);\mathcal I^\iota(S)\\
1319\mathcal I^\iota(i_k : \sop{while}b\sbin{do}S) &=
1320  i_k \ass 0; \sop{while}b\sbin{do}(\mathcal I^\iota (S); i_k \ass i_k + 1)
1321\end{aligned}
1322$$
1323
1324\section{Notes on the implementation and further work}\label{sec:conc}
1325Implementing the indexed label approach in CerCo's untrusted OcaML prototype
1326does not introduce many aspects beyond what has already been presented for the toy
1327language \imp{} with \s{goto}s. \s{Clight}, the fragment of \s{C} which is the
1328source language of CerCo's compilation chain~\cite{D2.1}, has several more fetaures,
1329but few demand changes in the indexed labelled approach.
1330\paragraph{Indexed loops vs index update instructions.} In this presentation
1331we had indexed loops in $\iota\ell\imp$, while we hinted at the fact that later
1332languages in the compilation chain would have specific index update instructions.
1333In CerCo's actual compilation chain from \s{Clight} to 8051 assembly, indexed
1334loops are only in \s{Clight}, while from \s{Cminor} onward all languages have
1335the same three cost-involving instructions: label emitting, index resetting and
1336index incrementing.
1337\paragraph{Loop transformations in the front end.} We decided to implement
1338the two loop transformations in the front end, namely in \s{Clight}. This
1339decision is due to user readability concerns: if costs are to be presented to
1340the programmer, they should depend on structures written by the programmer
1341himself. If loop transformation were performed later it would be harder to
1342relate information on loops in the control flow graph with actual loops
1343written in the source code. Another solution would be however to index
1344loops in the source code and then use these indexes later in the compilation
1345chain to pinpoint explicit loops of the source code: loop indexes can be used
1346to preserve such information just like cost labels.
1347\paragraph{Break and continue statements.} \s{Clight}'s loop flow control
1348statements for breaking and continuing a loop are equivalent to appropriate
1349\s{goto} statements. The only difference is that we are assured that they cannot
1350cause loops to be multi-entry, and that when a transformation such as loop
1351peeling is done, they need to be replaced by actual \s{goto}s (which will happen
1352further down the compilation chain anyway).
1353\paragraph{Function calls.} Every internal function definition has its own
1354space of loop indexes. Executable semantics must thus take into account saving
1355and resetting the constant indexing of current loops upon hitting a function
1356call, and restoring it upon return of control. A peculiarity is that this cannot
1357be attached to actions saving and restoring frames: namely in the case of
1358tail calls the constant indexing needs to be saved whereas the frame does not.
1359\paragraph{Cost-labelled expressions.} In labelled \s{Clight} expressions get
1360cost labels too, because of the presence of ternary conditional expressions
1361(and lazy logical operators, which get translated to ternary expressions too).
1362Adapting the indexed labelled approach to cost-labelled expressions does not
1363pose particular problems.
1364\paragraph{Simplification of dependent costs.}
1365As already mentioned, the blind application of the procedure described in
1366\autoref{ssec:depcosts} produces unwieldy cost annotations. In the implementation
1367several transformations are used to simplify such dependent costs.
1368
1369Disjunctions of simple conditions are closed under all logical operations, and
1370it can be computed whether such a disjunction implies a simple condition
1371or its negation. This can be used to eliminate useless branches of dependent costs,
1372to merge branches that have the same value, and possibly to simplify the third
1373case of simple condition. Examples of the three transformations are respectively
1374\begin{itemize}
1375\item $
1376\verb+(_i_0 == 0)?+x\verb+:(_i_0 >= 1)?+y\verb+:+z
1377\mapsto
1378\verb+(_i_0 == 0)?+x\verb+:+y,
1379$
1380\item $
1381c\texttt{?}x\verb+:(+d\texttt{?}x\texttt{:}y\verb+)+
1382\mapsto
1383\texttt{(}c\texttt{ || }d\texttt{)?}x\texttt{:}y,
1384$
1385\item \begin{tabular}[t]{np{\linewidth}n}
1386$\verb+(_i_0 == 0)?+x\verb+:(_i_0 % 2 == 0 && _i_0 >= 2)?+y\verb+:+z
1387\mapsto$ \\\hfill
1388$\verb+(_i_0 == 0)?+x\verb+:(_i_0 % 2 == 0)?+y\verb+:+z.
1389$\end{tabular}
1390\end{itemize}
1391The second transformation tends to accumulate disjunctions, again to the detriment
1392of readability. A further transformation swaps two branches of the ternary
1393expression if the negation of the condition can be expressed with less clauses.
1394An example is
1395$$ \verb+(_i_0 % 3 == 0 || _i_0 % 3 == 1)?+x\verb+:+y \mapsto
1396\verb+(_i_0 % 3 == 2)?+y\verb+:+x.
1397$$
1398
1399\paragraph{Updates to the frama-C cost plugin.}
1400Cerco's frama-C~\cite{framac} cost plugin\todo{is there a reference for this?}{}
1401has been updated to take into account dependent
1402costs. The frama-c framework explodes ternary expressions to actual branch
1403statements introducing temporaries along the way, which makes the task of
1404analyzing ternary cost expressions rather daunting. It was deemed necessary to provide
1405an option in the compiler to use actual branch statements for cost annotations
1406rather than ternary expressions, so that at least frama-C's use of temporaries in
1407cost annotation be avoided. The cost analysis carried out by the plugin now
1408takes into account such dependent costs.
1409
1410The only limitation (which provided
1411a simplification in the code) is that within a dependent cost
1412simple conditions with modulus on the
1413same loop index should not be modulo different numbers, which corresponds to
1414the reasonable limitation of not applying multiple times loop unrolling to
1415the same loops.
1416\paragraph{Further work.}
1417Indexed labels are for now implemented only in the untrusted OcaML compiler,
1418while they are not present yet in the Matita code. Porting them should pose no
1419significant problem, and then the proof effort should begin.
1420
1421Because most of the executable operational semantics of the languages across the
1422front end and the back end are oblivious to cost labels, it should be expected
1423that the bulk of the semantic preservation proofs that still needs to be done
1424will not get any harder because of indexed labels. The only trickier point
1425would be in the translation of \s{Clight} to \s{Cminor}, where we
1426pass from structured indexed loops to atomic instructions on loop indexes.
1427
1428An invariant which should probably be proved and provably preserved along compilation
1429is the non-overlap of indexings for the same atom. Then, supposing cost
1430correctness for the unindexed approach, the indexed one will just need to
1431add the proof that
1432$$\forall C\text{ constant indexing}.\forall \alpha\la I\ra\text{ appearing in the compiled code}.
1433  \kappa(\alpha)\circ (I\circ C) = \kappa(\alpha\la I \ra).$$
1434$C$ represents a snapshot of loop indexes in the compiled code, while
1435$I\circ C$ is the corresponding snapshot in the source code.
1436
1437A part from carrying over the proofs, we would like to extend the approach
1438to more loop transformations. Important examples are loop inversion
1439(where a for loop is reversed, usually to make iterations appear as truly
1440independent) or loop interchange (where two nested loops are swapped, usually
1441to have more loop invariants or to enhance strength reduction). This introduces
1442interesting changes to the approach, where we would have indexings such as
1443$$i_0\mapsto n - i_0\quad\text{or}\quad i_0\mapsto i_1, i_1\mapsto i_0.$$
1444In particular dependency over actual variables of the code would enter the
1445frame, as indexings would depend on the number of iterations of a well-behaving
1446guarded loop (the $n$ in the first example).
1447
1448%
1449% \newpage
1450%
1451% \includepdf[pages={-}]{plugin.pdf}
1452%
1453%
1454% \newpage
1455%
1456% \includepdf[pages={-}]{fopara.pdf}
1457%
1458%
1459% \newpage
1460%
1461% \includepdf[pages={-}]{tlca.pdf}
1462%
1463% \bibliographystyle{plain}
1464\bibliography{bib}
1465
1466\end{document}
Note: See TracBrowser for help on using the repository browser.