source: Deliverables/addenda/indexed_labels/report.tex @ 1678

Last change on this file since 1678 was 1678, checked in by mulligan, 8 years ago

finished editing the english in the report

File size: 68.0 KB
Line 
1
2\documentclass[11pt,epsf,a4wide]{article}
3\usepackage{../../style/cerco}
4\usepackage{pdfpages}
5
6\usepackage{graphics}
7
8% For SLNCS comment above and use
9% \documentclass{llncs}
10
11
12
13
14
15\RequirePackage[latin1]{inputenc}     
16 
17% Mettre les différents packages et fonctions que l'on utilise
18\usepackage[english]{babel} 
19\usepackage{amsmath} 
20\usepackage{amsfonts} 
21\usepackage{amssymb} 
22\usepackage{xspace} 
23\usepackage{latexsym} 
24\usepackage{url} 
25\usepackage{xspace} 
26%\usepackage{fancyvrb}
27%\usepackage[all]{xy}
28%packages pour LNCS
29%\usepackage{semantic}
30%\usepackage{cmll}
31% Packages for RR
32\usepackage{graphics,color}             
33\RequirePackage[latin1]{inputenc}   
34\usepackage{array}
35 
36%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
37%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
38 
39\newenvironment{comment}{{\bf MORE WORK:}} 
40 
41\newenvironment{restate-proposition}[2][{}]{\noindent\textbf{Proposition~{#2}}
42\;\textbf{#1}\  
43}{\vskip 1em} 
44 
45\newenvironment{restate-theorem}[2][{}]{\noindent\textbf{Theorem~{#2}}\;\textbf{
46#1}\  
47}{\vskip 1em} 
48 
49\newenvironment{restate-corollary}[2][{}]{\noindent\textbf{Corollary~{#2}}
50\;\textbf{#1}\  
51}{\vskip 1em} 
52 
53\newcommand{\myparagraph}[1]{\medskip\noindent\textbf{#1}} 
54 
55\newcommand{\Proofitemb}[1]{\medskip \noindent {\bf #1\;}} 
56\newcommand{\Proofitemfb}[1]{\noindent {\bf #1\;}} 
57\newcommand{\Proofitem}[1]{\medskip \noindent $#1\;$} 
58\newcommand{\Proofitemf}[1]{\noindent $#1\;$} 
59\newcommand{\Defitem}[1]{\smallskip \noindent $#1\;$} 
60\newcommand{\Defitemt}[1]{\smallskip \noindent {\em #1\;}} 
61\newcommand{\Defitemf}[1]{\noindent $#1\;$} 
62 
63 
64%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
65%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
66 
67 
68 
69%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
70%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
71 
72\newcommand{\eqdef}{=_{\text{def}}} 
73\newcommand{\concat}{\cdot}%%{\mathbin{+}}
74\newcommand{\Int}{\mathit{int}} 
75\newcommand{\nat}{\mathit{nat}} 
76\newcommand{\String}{\mathit{string}} 
77\newcommand{\Ident}{\mathit{ident}} 
78\newcommand{\Block}{\mathit{block}} 
79\newcommand{\Signature}{\mathit{signature}} 
80 
81\newcommand{\pc}{\mathit{pc}} 
82\newcommand{\estack}{\mathit{estack}} 
83\newcommand{\Error}{\epsilon} 
84 
85%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
86%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
87 
88% --------------------------------------------------------------------- %
89% Proof rule.                                                           %
90% --------------------------------------------------------------------- %
91 
92\newcommand{\staterule}[3]{%
93  $\begin{array}{@{}l}%
94   \mbox{#1}\\%
95   \begin{array}{c}
96   #2\\ 
97   \hline 
98   \raisebox{0ex}[2.5ex]{\strut}#3%
99   \end{array}
100  \end{array}$} 
101 
102\newcommand{\GAP}{2ex} 
103 
104\newcommand{\recall}[2]{%
105 $\begin{array}{c}
106 #1\\ 
107 \hline 
108 \raisebox{0ex}[2.5ex]{\strut}#2%
109 \end{array}$} 
110 
111\newcommand{\hbra}{\noindent\hbox to \textwidth{\leaders\hrule height1.8mm
112depth-1.5mm\hfill}} 
113\newcommand{\hket}{\noindent\hbox to \textwidth{\leaders\hrule
114height0.3mm\hfill}} 
115\newcommand{\ratio}{.3} 
116 
117\newenvironment{display}[1]{\begin{tabbing} 
118  \hspace{1.5em} \= \hspace{\ratio\linewidth-1.5em} \= \hspace{1.5em} \= \kill 
119  \noindent\hbra\\[-.5em] 
120  {\ }\textsc{#1}\\[-.8ex] 
121  \hbox to \textwidth{\leaders\hrule height1.6mm depth-1.5mm\hfill}\\[-.8ex] 
122  }{\\[-.8ex]\hket 
123  \end{tabbing}} 
124 
125 
126\newcommand{\sbline}{\hfill\smash[t]{\rule[1.5em]{\textwidth}{0.2ex}
127\hfill\hspace*{0ex}}} 
128\newcommand{\sline}{\hfill\smash[t]{\rule[1.5em]{\textwidth}{0.1ex}
129\hfill\hspace*{0ex}}} 
130\newcommand{\sentry}[2]{\>$#1$\>\ \smash[t]{\vrule width 0.2mm height
131    1.2\baselineskip depth 1.5\baselineskip}\>#2} 
132 
133\newcommand{\entry}[2]{\>$#1$\>\>#2} 
134\newcommand{\clause}[2]{$#1$\>\>#2} 
135\newcommand{\category}[2]{\clause{#1::=}{#2}} 
136\newcommand{\subclause}[1]{\>\>\>#1} 
137\newcommand{\redrule}[3]{$#1$\>\>$#2$\>\>\>#3} 
138 
139%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
140%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
141 
142% environments
143
144% To be commented for LNCS
145 \newtheorem{theorem}{Theorem} 
146 \newtheorem{fact}[theorem]{Fact} 
147 \newtheorem{definition}[theorem]{Definition} 
148 \newtheorem{lemma}[theorem]{Lemma} 
149 \newtheorem{corollary}[theorem]{Corollary} 
150 \newtheorem{proposition}[theorem]{Proposition} 
151 \newtheorem{example}[theorem]{Example} 
152 \newtheorem{exercise}[theorem]{Exercise} 
153 \newtheorem{remark}[theorem]{Remark} 
154 \newtheorem{question}[theorem]{Question}
155 \newtheorem{proviso}[theorem]{Proviso} 
156  \newtheorem{conjecture}[theorem]{Conjecture}
157
158% proofs 
159 
160\newcommand{\Proof}{\noindent {\sc Proof}. } 
161\newcommand{\Proofhint}{\noindent {\sc Proof hint}. } 
162% To be commented for LNCS
163 \newcommand{\qed}{\hfill${\Box}$} 
164\newcommand{\EndProof}{\qed}
165 
166% figure environment
167 
168\newcommand{\Figbar}{{\center \rule{\hsize}{0.3mm}}}
169 %horizontal thiner line for figures
170\newenvironment{figureplr}[1][t]{\begin{figure}[#1] \Figbar}{\Figbar \end{figure}}
171%environment for figures
172%       ************Macros for mathematical symbols*************
173% Style
174 
175\newcommand{\cl}[1]{{\cal #1}}          % \cl{R} to make R calligraphic
176\newcommand{\la}{\langle}               % the brackets for pairing (see also \pair)
177\newcommand{\ra}{\rangle} 
178 
179\newcommand{\lf}{\lfloor} 
180\newcommand{\rf}{\rfloor} 
181\newcommand{\ul}[1]{\underline{#1}}     % to underline
182\newcommand{\ol}[1]{\overline{#1}}      % to overline
183\newcommand{\ok}{~ok}                   % well formed context
184 
185% Syntax
186 
187\newcommand{\Gives}{\vdash}             % in a type judgment
188\newcommand{\IGives}{\vdash_{I}}        % intuitionistic provability
189\newcommand{\AIGives}{\vdash_{{\it AI}}} %affine-intuitionistic provability
190\newcommand{\CGives}{\vdash_{C}}        % affine-intuitionistic confluent provability
191
192
193\newcommand{\Models}{\mid \! =}              % models
194
195\newcommand{\emptycxt}{\On}              % empty context
196\newcommand{\subs}[2]{[#1 / #2]} 
197\newcommand{\sub}[2]{[#2 / #1]}         % substitution \sub{x}{U} gives [U/x]
198 
199\newcommand{\Sub}[3]{[#3 / #2]#1}       % Substitution with three arguments \Sub{V}{x}{U}
200
201\newcommand{\lsub}[2]{#2 / #1}          % substitution \lsub{x}{U} gives U/x, to  be used in a list.
202 
203\newcommand{\impl}{\supset} 
204\newcommand{\arrow}{\rightarrow}        % right thin arrow
205\newcommand{\trarrow}{\stackrel{*}{\rightarrow}}        % trans closure
206%\newcommand{\limp}{\makebox[5mm]{\,$- \! {\circ}\,$}}   % linear
207                                % implication
208\newcommand{\limp}{\multimap} %linear implication
209\newcommand{\bang}{\, !} 
210% LNCS
211%\newcommand{\bang}{\oc}
212\newcommand{\limpe}[1]{\stackrel{#1}{\multimap}}
213\newcommand{\hyp}[3]{#1:(#2, #3)}
214\newcommand{\letm}[3]{{\sf let} \ ! #1 = #2 \ {\sf in} \ #3}    % modal let
215\newcommand{\lets}[3]{{\sf let} \ #1 = #2 \ {\sf in} \ #3}    % simple let
216\newcommand{\letp}[3]{{\sf let} \ \S #1 = #2 \ {\sf in} \ #3}    % paragraph let
217\newcommand{\tertype}{{\bf 1}}
218\newcommand{\behtype}{{\bf B}}
219\newcommand{\bt}[1]{{\it BT}(#1)}       % Boehm tree
220\newcommand{\cxt}[1]{#1[~]}             % Context with one hole
221\newcommand{\pr}{\parallel}             % parallel ||
222\newcommand{\Nat}{\mathbf{N}}                 % natural numbers
223\newcommand{\Natmax}{\mathbf{N}_{{\it max}}}  % natural numbers with minus infinity
224\newcommand{\Rat}{\mathbf{Q}^{+}}                 % non-negative rationals
225\newcommand{\Ratmax}{\mathbf{Q}^{+}_{{\it max}}}  % non-negative rationals with minus infinity
226\newcommand{\Alt}{ \mid\!\!\mid  } 
227\newcommand{\isum}{\oplus} 
228\newcommand{\csum}{\uplus}              %context sum
229\newcommand{\dpar}{\mid\!\mid} 
230                                        % for the production of a grammar containing \mid
231\newcommand{\infer}[2]{\begin{array}{c} #1 \\ \hline #2 \end{array}} 
232                                        % to make a centered inference rule
233 
234% (Meta-)Logic
235 
236\newcommand{\bool}{{\sf bool}}          % boolean values
237\newcommand{\Or}{\vee}                  % disjunction
238\newcommand{\OR}{\bigvee}               % big disjunction
239\newcommand{\AND}{\wedge}               % conjunction
240\newcommand{\ANDD}{\bigwedge}           % big conjunction
241\newcommand{\Arrow}{\Rightarrow}        % right double arrow
242\newcommand{\IFF}{\mbox{~~iff~~}}       % iff in roman and with spaces
243\newcommand{\iffArrow}{\Leftrightarrow} % logical equivalence
244 
245% Semantics
246 
247\newcommand{\dl}{[\![}                  % semantic [[
248\newcommand{\dr}{]\!]}                  % semantic ]]
249\newcommand{\lam}{{\bf \lambda}}        % semantic lambda
250
251
252% The equivalences for this paper
253
254% the usual ones
255\newcommand{\ubis}{\approx^u}          % usual labelled weak bis
256\newcommand{\uabis}{\approx^{u}_{ccs}} % usual labelled weak bis on CCS
257
258% the contextual conv sensitive
259\newcommand{\cbis}{\approx}        % convergence sensitive bis
260\newcommand{\cabis}{\approx_{ccs}}  % convergence sensitive bis on CCS
261
262% the labelled conv sensitive
263\newcommand{\lcbis}{\approx^{\ell}} %
264\newcommand{\lcabis}{\approx^{\ell}_{ccs}} % labelled convergence sensitive bis on CCS
265\newcommand{\lcbiswrong}{\approx^{\ell \Downarrow}} %
266
267
268
269
270
271
272
273\newcommand{\maytest}{=_{\Downarrow}}
274\newcommand{\musttest}{=_{\Downarrow_{S}}}
275
276
277 
278 
279% Sets
280 
281\newcommand{\prt}[1]{{\cal P}(#1)}      % Parts of a set
282\newcommand{\finprt}[1]{{\cal P}_{fin}(#1)}% Finite parts
283\newcommand{\finprtp}[1]{{\cal P}_{fin}^{+}(#1)}% Non-empty Finite parts
284\newcommand{\union}{\cup}               % union
285\newcommand{\inter}{\cap}               % intersection
286\newcommand{\Union}{\bigcup}            % big union
287\newcommand{\Inter}{\bigcap}            % big intersection
288\newcommand{\cpl}[1]{#1^{c}}            % complement
289\newcommand{\card}{\sharp}              % cardinality
290\newcommand{\minus}{\backslash}         % set difference
291\newcommand{\sequence}[2]{\{#1\}_{#2}}  % ex. \sequence{d_n}{n\in \omega}
292\newcommand{\comp}{\circ}               % functional composition
293%\newcommand{\oset}[1]{\{#1\}}            % set enumeration
294\newcommand{\mset}[1]{\{\! | #1 |\!\}}  % pseudo-set notation {| |}
295 
296% Domains
297 
298\newcommand{\two}{{\bf O}}              % Sierpinski space
299\newcommand{\join}{\vee}                % join
300\newcommand{\JOIN}{\bigvee}             % big join 
301\newcommand{\meet}{\wedge}              % meet
302\newcommand{\MEET}{\bigwedge}           % big meet
303\newcommand{\dcl}{\downarrow}           % down closure
304\newcommand{\ucl}{\uparrow}             % up closure
305\newcommand{\conv}{\downarrow}          % synt. conv. pred. (postfix)
306\newcommand{\diver}{\uparrow}           % diverging term
307\newcommand{\Conv}{\Downarrow}          % sem. conv. pred. (postfix)
308\newcommand{\SConv}{\Downarrow_{S}}          % sem. conv. pred. (postfix)
309\newcommand{\CConv}{\Downarrow_{C}}
310\newcommand{\Diver}{\Uparrow}           % diverging map
311\newcommand{\cpt}[1]{{\cal K}(#1)}      % compacts, write \cpt{D}
312\newcommand{\ret}{\triangleleft}        % retract
313\newcommand{\nor}{\succeq}
314\newcommand{\prj}{\underline{\ret}}     % projection
315\newcommand{\parrow}{\rightharpoonup}   % partial function space
316\newcommand{\ub}[1]{{\it UB}(#1)}       % upper bounds
317\newcommand{\mub}[1]{{\it MUB}(#1)}     % minimal upper bounds
318\newcommand{\lift}[1]{(#1)_{\bot}}      % lifting
319\newcommand{\forget}[1]{\underline{#1}} % forgetful translation
320
321%\newcommand{\rel}[1]{\;{\cal #1}\;}     % infix relation (calligraphic)
322\newcommand{\rl}[1]{\;{\cal #1}\;}             % infix relation
323\newcommand{\rel}[1]{{\cal #1}}         %calligraphic relation with no
324                                        %extra space
325\newcommand{\per}[1]{\;#1 \;} 
326\newcommand{\wddagger}{\natural}  % weak suspension
327%\newcommand{\wddagger}{=\!\!\!\!\parallel}  % weak suspension
328% Categories
329 
330\newcommand{\pair}[2]{\langle #1 , #2 \rangle} % pairing \pair{x}{y}, do not use < >.
331 
332%               *******  Notation for the $\pi$-calculus *********
333% Syntax:
334 
335 
336\newcommand{\fn}[1]{{\it fn}(#1)}                       % free names
337\newcommand{\bn}[1]{{\it bn}(#1)}                       % bound names
338\newcommand{\names}[1]{{\it n}(#1)}                     % names
339\newcommand{\true}{{\sf t}}                             % true
340\newcommand{\false}{{\sf f}}                            % false
341\newcommand{\pio}{\pi_1}                                % 1 receptor calculus
342\newcommand{\pioo}{\pi_{1}^{r}} 
343\newcommand{\piom}{\pi_{1}^{-}}                         % 1 receptor calculus wo match
344\newcommand{\pioi}{\pi_{1I}}                    % 1 receptor I-calculus
345\newcommand{\pifo}{\pi_{\w{1f}}}                                % functional calculus
346\newcommand{\pilo}{\pi_{\w{1l}}}                                % located calculus
347\newcommand{\sort}[1]{{\it st}(#1)}                     % sort
348\newcommand{\ia}[1]{{\it ia}(#1)}                     % sort
349\newcommand{\ite}[3]{{\sf if~} #1 {\sf ~then~} #2 {\sf ~else~} #3}      %if then else
350\newcommand{\casep}[2]{{\sf case}^{\times}(#1, \pair{x}{y}\Arrow#2)}      %case on pairs
351\newcommand{\casel}[3]{{\sf case}^{L}(#1, #2, \s{cons}(x,y)\Arrow#3)}      %case on lists
352\newcommand{\caseb}[3]{{\sf case}^{b}(#1, #2, \s{cons}(x,y)\Arrow#3)}      %case on lists
353\newcommand{\nil}{{\sf nil}} 
354\newcommand{\cons}{{\sf cons}} 
355\newcommand{\idle}[1]{{\it Idle}(#1)}                   %idle process
356\newcommand{\conf}[1]{\{ #1 \}}                         %configuration
357\newcommand{\link}[2]{#1 \mapsto #2}                    %likn a ->b
358\newcommand{\mand}{\mbox{ and }} 
359\newcommand{\dvec}[1]{\tilde{{\bf #1}}}                 %double vector
360\newcommand{\erloc}[1]{{\it er}_{l}(#1)}                % location erasure
361\newcommand{\w}[1]{{\it #1}}    %To write in math style
362\newcommand{\vcb}[1]{{\bf #1}} 
363\newcommand{\lc}{\langle\!|} 
364\newcommand{\rc}{|\!\rangle} 
365\newcommand{\obj}[1]{{\it obj}(#1)} 
366\newcommand{\move}[1]{{\sf move}(#1)} 
367\newcommand{\qqs}[2]{\forall\, #1\;\: #2} 
368\newcommand{\qtype}[4]{\forall #1 :  #2 . (#4,#3)} 
369\newcommand{\xst}[2]{\exists\, #1\;\: #2} 
370\newcommand{\xstu}[2]{\exists\, ! #1\;\: #2} 
371\newcommand{\dpt}{\,:\,} 
372\newcommand{\cond}[3]{\mathsf{if}\ #1\ \mathsf{then}\ #2\ \mathsf{else}\ #3} 
373\newcommand{\s}[1]{{\sf #1}}    % sans-serif 
374\newcommand{\vc}[1]{{\bf #1}} 
375\newcommand{\lnorm}{\lbrack\!\lbrack} 
376\newcommand{\rnorm}{\rbrack\!\rbrack} 
377\newcommand{\sem}[1]{\underline{#1}} 
378\newcommand{\tra}[1]{\langle #1 \rangle}
379\newcommand{\trb}[1]{[ #1 ]}
380\newcommand{\squn}{\mathop{\scriptstyle\sqcup}} 
381\newcommand{\lcro}{\langle\!|} 
382\newcommand{\rcro}{|\!\rangle} 
383\newcommand{\semi}[1]{\lcro #1\rcro} 
384\newcommand{\sell}{\,\ell\,} 
385\newcommand{\SDZ}[1]{\marginpar{\textbf{SDZ:} {#1}}} 
386 
387\newcommand{\when}[3]{{\sf when}~#1~{\sf then}~#2~{\sf else}~#3} 
388\newcommand{\wthen}[2]{{\sf when}~#1~{\sf then}~#2~} 
389\newcommand{\welse}[1]{{\sf else}~#1} 
390
391%Pour la fleche double, il faut rajouter :
392%      \usepackage{mathtools}
393
394\newcommand{\act}[1]{\xrightarrow{#1}} %labelled actionlow %high
395
396\newcommand{\lact}[1]{\stackrel{#1}{\makebox[5mm]{\,$- \! {\circ}\,$}}}
397
398\newcommand{\set}[1]{\{#1\}}
399\newcommand{\pst}[2]{{\sf pset}(#1,#2)}
400\newcommand{\st}[2]{{\sf set}(#1,#2)}
401\newcommand{\wrt}[2]{{\sf w}(#1,#2)}
402
403\newcommand{\chtype}[2]{{\it Ch_{#1}(#2)}}
404\newcommand{\rgtype}[2]{{\it {\sf Reg}_{#1} #2}}
405
406\newcommand{\get}[1]{{\sf get}(#1)}
407
408%\newcommand{\wact}[1]{\xRightarrow{#1}} %weak labelled action low high
409
410%\newcommand{\mact}[1]{\xrightarrow{#1}_{m}} %labelled action low %high
411
412%\newcommand{\wmact}[1]{\xRightarrow{#1}_{m}} %weak labelled action low high
413
414%\newcommand{\act}[1]{\stackrel{#1}{\rightarrow}} %labelled action low
415                                %%%high
416
417\newcommand{\acteq}[1]{\stackrel{#1}{\leadsto}} %labelled action low
418                                %%%high
419
420
421%\newcommand{\actI}[1]{\stackrel{#1}{\rightarrow_{1}}} %labelled action low
422\newcommand{\actI}[1]{\xrightarrow{#1}_{1}}
423
424%\newcommand{\actII}[1]{\stackrel{#1}{\rightarrow_{2}}} %labelled action low
425\newcommand{\actII}[1]{\xrightarrow{#1}_{2}}
426
427
428 \newcommand{\wact}[1]{\stackrel{#1}{\Rightarrow}} %weak labelled action low high
429\newcommand{\wactI}[1]{\stackrel{#1}{\Rightarrow_{1}}} %weak labelled action low high
430\newcommand{\wactII}[1]{\stackrel{#1}{\Rightarrow_{2}}} %weak labelled action low high
431
432
433\newcommand{\mact}[1]{\stackrel{#1}{\rightarrow_{m}}} %labelled action low
434%high
435\newcommand{\wmact}[1]{\stackrel{#1}{\Rightarrow_{m}}} %weak labelled action low high
436 
437%\newcommand{\lact}[1]{\stackrel{#1}{\leftarrow}}
438\newcommand{\lwact}[1]{\stackrel{#1}{\Leftarrow}} 
439 
440 
441 
442\newcommand{\eval}{\Downarrow} 
443\newcommand{\Eval}[1]{\Downarrow^{#1}} 
444 
445 
446\newcommand{\Z}{{\bf Z}} 
447\newcommand{\Real}{\mathbb{R}^{+}} 
448\newcommand{\Return}{\ensuremath{\mathtt{return}}\xspace}                 
449\newcommand{\Stop}{\ensuremath{\mathtt{stop}}\xspace} 
450\newcommand{\Wait}{\ensuremath{\mathtt{wait}}\xspace} 
451\newcommand{\Read}{\ensuremath{\mathtt{read}}\xspace} 
452\newcommand{\Write}{\ensuremath{\mathtt{write}}\xspace} 
453\newcommand{\Yield}{\ensuremath{\mathtt{yield}}\xspace} 
454\newcommand{\Next}{\ensuremath{\mathtt{next}}\xspace} 
455\newcommand{\Load}{\ensuremath{\mathtt{load}}\xspace} 
456\newcommand{\Call}{\ensuremath{\mathtt{call}}\xspace} 
457\newcommand{\Tcall}{\ensuremath{\mathtt{tcall}}\xspace} 
458\newcommand{\Pop}{\ensuremath{\mathtt{pop}}\xspace} 
459\newcommand{\Build}{\ensuremath{\mathtt{build}}\xspace} 
460\newcommand{\Branch}{\ensuremath{\mathtt{branch}}\xspace} 
461\newcommand{\Goto}{\ensuremath{\mathtt{goto}}\xspace} 
462 
463\newcommand{\hatt}[1]{#1^{+}} 
464\newcommand{\Of}{\mathbin{\w{of}}} 
465 
466\newcommand{\susp}{\downarrow} 
467\newcommand{\lsusp}{\Downarrow_L} 
468\newcommand{\wsusp}{\Downarrow} 
469\newcommand{\commits}{\searrow} 
470 
471 
472\newcommand{\spi}{S\pi} 
473 
474
475 \newcommand{\pres}[2]{#1\triangleright #2} %TCCS else next (alternative)
476% \newcommand{\pres}[2]{ \lfloor #1 \rfloor (#2)}  %TCCS else next
477\newcommand{\present}[3]{{\sf present} \ #1 \ {\sf do } \ #2 \ {\sf  else} \ #3}
478
479
480\newcommand{\tick}{{\sf tick}}          %tick action
481
482 
483 
484\newcommand{\sbis}{\equiv_L} 
485\newcommand{\emit}[2]{\ol{#1}#2} 
486%\newcommand{\present}[4]{#1(#2).#3,#4}
487\newcommand{\match}[4]{[#1=#2]#3,#4}       %pi-equality
488
489\newcommand{\matchv}[4]{[#1 \unrhd #2]#3,#4}
490
491\newcommand{\new}[2]{\nu #1 \ #2} 
492\newcommand{\outact}[3]{\new{{\bf #1}}{\emit{#2}{#3}}} 
493\newcommand{\real}{\makebox[5mm]{\,$\|\!-$}}% realizability relation
494
495\newcommand{\regterm}[2]{{\sf reg}_{#1} #2}
496\newcommand{\thread}[1]{{\sf thread} \ #1}
497\newcommand{\store}[2]{(#1 \leftarrow #2)}
498\newcommand{\pstore}[2]{(#1 \Leftarrow #2)}
499
500\newcommand{\regtype}[2]{{\sf Reg}_{#1} #2}
501\newcommand{\uregtype}[3]{{\sf Reg}_{#1}(#2, #3)}
502\newcommand{\urtype}[2]{{\sf Reg}(#1, #2)}
503
504\newcommand{\upair}[2]{[#1,#2]}
505\newcommand{\letb}[3]{\mathsf{let}\;\oc #1 = #2\;\mathsf{in}\;#3}
506
507\newcommand{\vlt}[1]{{\cal V}(#1)}
508\newcommand{\prs}[1]{{\cal P}(#1)}
509
510\newcommand{\imp}{{\sf Imp}}            %imp language
511\newcommand{\vm}{{\sf Vm}}              %virtual machine language
512\newcommand{\mips}{{\sf Mips}}          %Mips language
513\newcommand{\C}{{\sf C}}                % C language
514\newcommand{\Clight}{{\sf Clight}}        %C light language
515\newcommand{\Cminor}{{\sf Cminor}}
516\newcommand{\RTLAbs}{{\sf RTLAbs}}
517\newcommand{\RTL}{{\sf RTL}}
518\newcommand{\ERTL}{{\sf ERTL}}
519\newcommand{\LTL}{{\sf LTL}}
520\newcommand{\LIN}{{\sf LIN}}
521\newcommand{\access}[1]{\stackrel{#1}{\leadsto}}
522\newcommand{\ocaml}{{\sf ocaml}}
523\newcommand{\coq}{{\sf Coq}}
524\newcommand{\compcert}{{\sf CompCert}}
525%\newcommand{\cerco}{{\sf CerCo}}
526\newcommand{\cil}{{\sf CIL}}
527\newcommand{\scade}{{\sf Scade}}
528\newcommand{\absint}{{\sf AbsInt}}
529\newcommand{\framac}{{\sf Frama-C}}
530\newcommand{\powerpc}{{\sf PowerPc}}
531\newcommand{\lustre}{{\sf Lustre}}
532\newcommand{\esterel}{{\sf Esterel}}
533\newcommand{\ml}{{\sf ML}}
534
535\newcommand{\codeex}[1]{\texttt{#1}}   % code example
536
537\bibliographystyle{abbrv} 
538
539\title{
540INFORMATION AND COMMUNICATION TECHNOLOGIES\\
541(ICT)\\
542PROGRAMME\\
543\vspace*{1cm}Project FP7-ICT-2009-C-243881 \cerco{}}
544
545\date{ }
546\author{}
547%>>>>>> new commands used in this document
548% \usepackage[nohyperref,nosvn]{mystyle}
549
550\usepackage{multirow}
551\newcolumntype{b}{@{}>{{}}}
552\newcolumntype{B}{@{}>{{}}c<{{}}@{}}
553\newcolumntype{h}[1]{@{\hspace{#1}}}
554\newcolumntype{L}{>{$}l<{$}}
555\newcolumntype{C}{>{$}c<{$}}
556\newcolumntype{R}{>{$}r<{$}}
557\newcolumntype{S}{>{$(}r<{)$}}
558\newcolumntype{n}{@{}}
559\newcommand{\spanr}[2]{\multicolumn{1}{Rn}{\multirow{#1}{*}{(#2)}}}
560\def\nocol{\multicolumn{1}{ncn}{}}
561
562\usepackage[disable, colorinlistoftodos]{todonotes}
563\usepackage{enumerate}
564\usepackage{tikz}
565
566\newcommand{\tern}[3]{#1\mathrel ? #2 : #3}
567\newcommand{\sop}[1]{\s{#1}\ }
568\newcommand{\sbin}[1]{\ \s{#1}\ }
569\newcommand{\Ell}{\mathcal L}
570\newcommand{\alphab}{\boldsymbol\alpha}
571\newcommand{\betab}{\boldsymbol\beta}
572\newcommand{\gramm}{\mathrel{::=}}
573\newcommand{\ass}{\mathrel{:=}}
574
575\renewcommand{\to}[1][]{\stackrel{#1}{\rightarrow}}
576%<<<<<<<<<<<<
577\begin{document}
578% \thispagestyle{empty}
579%
580% \vspace*{-1cm}
581% \begin{center}
582% \includegraphics[width=0.6\textwidth]{../style/cerco_logo.png}
583% \end{center}
584%
585% \begin{minipage}{\textwidth}
586% \maketitle
587% \end{minipage}
588%
589%
590% \vspace*{0.5cm}
591% \begin{center}
592% \begin{LARGE}
593% \bf
594% Report \\
595% Dependent Cost Labels
596% \\
597% \end{LARGE}
598% \end{center}
599%
600% \vspace*{2cm}
601% \begin{center}
602% \begin{large}
603% Version 0.1
604% \end{large}
605% \end{center}
606%
607% \vspace*{0.5cm}
608% \begin{center}
609% \begin{large}
610% Main Author:\\
611% Paolo Tranquilli
612% \end{large}
613% \end{center}
614%
615% \vspace*{\fill}
616% \noindent
617% Project Acronym: \cerco{}\\
618% Project full title: Certified Complexity\\
619% Proposal/Contract no.: FP7-ICT-2009-C-243881 \cerco{}\\
620%
621% \clearpage \pagestyle{myheadings} \markright{\cerco{}, FP7-ICT-2009-C-243881}
622%
623% \newpage
624
625\listoftodos
626
627\section{Introduction}
628In~\cite{D2.1}, Armadio \emph{et al} propose an approach for building a compiler for a large fragment of the \textsc{c} programming language.
629The novelty of their proposal lies in the fact that their proposed design is capable of lifting execution cost information from the compiled code and presenting it to the user.
630This idea is foundational for the CerCo project, which strives to produce a mechanically certified version of such a compiler.
631
632To summarise, Armadio's proposal consisted of `decorations' on the source code, along with the insertion of labels at key points.
633These labels are preserved as compilation progresses, from one intermediate language to another.
634Once the final object code is produced, such labels should correspond to the parts of the compiled code that have a constant cost.
635
636Two properties must hold of any cost estimate.
637The first property, paramount to the correctness of the method, is \emph{soundness}, that is, that the actual execution cost is bounded by the estimate.
638In the labelling approach, this is guaranteed if every loop in the control flow of the compiled code passes through at least one cost label.
639The second property, optional but desirable, is \emph{preciseness}: the estimate \emph{is} the actual cost.
640In the labelling approach, this will be true if, for every label, every possible execution of the compiled code starting from such a label yields the same cost before hitting another one.
641In simple architectures such as the 8051 micro-controller this can be guaranteed by placing labels at the start of any branch in the control flow, and by ensuring that no labels are duplicated.
642
643The reader should note that the above mentioned requirements must hold when executing the code obtained at the end of the compilation chain.
644So even if one is careful about injecting the labels at suitable places in the source code, the requirements might still fail because of two main obstacles:
645\begin{itemize}
646\item
647The compilation process introduces important changes in the control flow, inserting loops or branches.
648For example, the insertion of functions in the source code replacing instructions that are unavailable in the target architecture.
649This require loops to be inserted (for example, for multi-word division and generic shift in the 8051 architecture), or effort spent in providing unbranching translations of higher level instructions~\cite{D2.2}.
650\item
651Even if the compiled code \emph{does}---as far as the the syntactic control flow graph is concerned---respect the conditions for soundness and preciseness, the cost of blocks of instructions might not be independent of context, so that different passes through a label might have different costs.
652This becomes a concern if one wishes to apply the approach to more complex architectures, for example one with caching or pipelining.
653\end{itemize}
654The first point unveils a weakness of the current labelling approach when it comes to some common code transformations performed along a compilation chain.
655In particular, most \emph{loop optimisations} are disruptive, in the sense outlined in the first bulletpoint above.
656An example optimisation of this kind is \emph{loop peeling}.
657This optimisation is employed by compilers in order to trigger other optimisations, for example, dead code elimination or invariant code motion.
658Here, a first iteration of the loop is hoisted out of the body of the loop, possibly being assigned a different cost than later iterations.
659
660The second bulletpoint above highlights a difference between existing tools for estimating execution costs and CerCo's approach advocated in~\cite{D2.1}, in favour of those existing tools and to the detriment of Armadio's proposal.
661For example, the well known tool \s{aiT}~\cite{absint}---based on abstract interpretation---allows the user to estimate the worst-case execution time (\textsc{wcet}) of a piece of source code, taking into account advanced features of the target architecture.
662\s{aiT}'s ability to produce tight estimates of execution costs, even if these costs depend on the context of execution, would enhance the effectiveness of the CerCo compiler, either by integrating such techniques in its development, or by directly calling this tool when ascertaining the cost of blocks of compiled code.
663For instance, a typical case where cache analysis yields a difference in the execution cost of a block is in loops: the first iteration will usually stumble upon more cache misses than subsequent iterations.
664
665If one looks closely, the source of the weakness of the labelling approach as presented in~\cite{D2.1} is common to both points: the inability to state different costs for different occurrences of labels, where the difference might be originated by labels being duplicated along the compilation, or the costs being sensitive to the current state of execution.
666The preliminary work we present here addresses this weakness by introducing cost labels that are dependent on which iteration of its containing loops it occurs in.
667This is achieved by means of \emph{indexed labels}; all cost labels are decorated with formal indices coming from the loops containing such labels.
668These indices allow us to rebuild, even after multiple loop transformations, which iterations of the original loops in the source code a particular label occurrence belongs to.
669During the annotation stage of the source code, this information is presented to the user by means of \emph{dependent costs}.
670
671We concentrate on integrating the labelling approach with two loop transformations.
672For general information on general compiler optimisations (and loop optimisations in particular) we refer the reader to the vast literature on the subject (e.g.~\cite{muchnick,morgan}).
673
674\paragraph{Loop peeling}
675As already mentioned, loop peeling consists in preceding the loop with a copy of its body, appropriately guarded.
676This is used, in general, to trigger further optimisations, such as those that rely on execution information which can be computed at compile time, but which is erased by further iterations of the loop, or those that use the hoisted code to be more effective at eliminating redundant code.
677Integrating this transformation in to the labelling approach would also allow the integration of the common case of cache analysis explained above; the analysis of cache hits and misses usually benefits from a form of \emph{virtual} loop peeling~\cite{cacheprediction}.
678
679\paragraph{Loop unrolling}
680This optimisation consists of the repetition of several copies of the body of the loop inside the loop itself (inserting appropriate guards, or avoiding them altogether if enough information about the loop's guard is available at compile time).
681This can limit the number of (conditional or unconditional) jumps executed by the code and trigger further optimisations dealing with pipelining, if appropriate for the architecture.
682\\\\
683Whilst we cover only two loop optimisations in this report, we argue that the work presented herein poses a good foundation for extending the labelling approach, in order to cover more and more common optimisations, as well as gaining insight into how to integrate advanced cost estimation techniques, such as cache analysis, into the CerCo compiler.
684Moreover loop peeling itself has the fortuitous property of enhancing and enabling other optimisations.
685Experimentation with CerCo's untrusted prototype compiler, which implements constant propagation and partial redundancy elimination~\cite{PRE,muchnick}, show how loop peeling enhances those other optimisations.
686
687\paragraph{Outline}
688We will present our approach on a minimal `toy' imperative language, \imp{} with \s{goto}s, which we present in Section~\ref{sec:defimp} along with formal definitions of the loop transformations.
689This language already presents most of the difficulties encountered when dealing with \textsc{c}, so we stick to it for the sake of this presentation.
690In Section~\ref{sec:labelling} we summarize the labelling approach as presented in~\cite{D2.1}.
691Section~\ref{sec:indexedlabels} presents \emph{indexed labels}, our proposal for dependent labels which are able to describe precise costs even in the presence of the various loop transformations we consider.
692Finally Section~\ref{sec:conc} goes into more detail regarding the implementation of indexed labels in CerCo's untrusted compiler and speculates on further work on the subject.
693
694\section{\imp{} with goto}\label{sec:defimp}
695We briefly outline the toy language, \imp{} with \s{goto}s.
696The language was designed in order to pose problems for the existing labelling approach, and as a testing ground for our new notion of indexed labels.
697
698The syntax and operational semantics of our toy language are presented in~\ref{fig:minimp}.
699Note, we may augment the language further, with \s{break} and \s{continue}, at no further expense.
700\begin{figureplr}
701$$\begin{gathered}
702\begin{array}{nlBl>(R<)n}
703\multicolumn{4}{C}{\bfseries Syntax}\\
704\multicolumn{4}{ncn}{
705  \ell,\ldots \hfill \text{(labels)} \hfill x,y,\ldots \hfill
706\text{(identifiers)}
707\hfill e,f,\ldots \hfill \text{(expression)}
708}\\
709P,S,T,\ldots &\gramm& \s{skip} \mid s;t
710\mid \sop{if}e\sbin{then}s\sbin{else}t
711\mid \sop{while} e \sbin{do} s \mid
712  x \ass e
713\\&\mid&
714\ell : s \mid \sop{goto}\ell& \spanr{-2}{statements}\\
715\\
716\multicolumn{4}{C}{\bfseries Semantics}\\
717K,\ldots  &\gramm& \s{halt} \mid S \cdot K & continuations
718\end{array}
719\\[15pt]
720\s{find}(\ell,S,K) \ass
721\left\{\begin{array}{lL}
722\bot & if $S=\s{skip},\sop{goto} \ell'$ or $x\ass e$,\\
723(T, K) & if $S=\ell:T$,\\
724\s{find}(\ell,T,K) & otherwise, if $S = \ell':T$,\\
725\s{find}(\ell,T_1,T_2\cdot K) & if defined and $S=T_1;T_2$,\\
726\s{find}(\ell,T_1,K) & if defined and
727$S=\sop{if}b\sbin{then}T_1\sbin{else}T_2$,\\
728\s{find}(\ell,T_2,K) & otherwise, if $S=T_1;T_2$ or
729$\sop{if}b\sbin{then}T_1\sbin{else}T_2$,\\
730\s{find}(\ell,T,S\cdot K) & if $S = \sop{while}b\sbin{do}T$.
731\end{array}\right.
732\\[15pt]
733\begin{array}{lBl}
734(x:=e,K,s)  &\to_P& (\s{skip},K,s[v/x]) \qquad\mbox{if }(e,s)\eval v \\ \\
735
736(S;T,K,s)  &\to_P& (S,T\cdot K,s) \\ \\
737
738(\s{if} \ b \ \s{then} \ S \ \s{else} \ T,K,s)
739&\to_P&\left\{
740\begin{array}{ll}
741(S,K,s) &\mbox{if }(b,s)\eval v \neq 0 \\
742(T,K,s) &\mbox{if }(b,s)\eval 0
743\end{array}
744\right. \\ \\
745
746
747(\s{while} \ b \ \s{do} \ S ,K,s)
748&\to_P&\left\{
749\begin{array}{ll}
750(S,\s{while} \ b \ \s{do} \ S \cdot K,s) &\mbox{if }(b,s)\eval v \neq 0 \\
751(\s{skip},K,s) &\mbox{if }(b,s)\eval 0
752\end{array}
753\right. \\ \\
754
755
756(\s{skip},S\cdot K,s)  &\to_P&(S,K,s) \\ \\
757
758(\ell : S, K, s)  &\to_P& (S,K,s) \\ \\
759
760(\sop{goto}\ell,K,s)  &\to_P& (\s{find}(\ell,P,\s{halt}),s) \\ \\
761\end{array}
762\end{gathered}$$
763\caption{The syntax and operational semantics of \imp.}
764\label{fig:minimp}
765\end{figureplr}
766The precise grammar for expressions is not particularly relevant so we do not give one in full.
767For the sake of conciseness we also treat boolean and arithmetic expressions together (with the usual \textsc{c} convention of an expression being true iff non-zero).
768We may omit the \s{else} clause of a conditional if it leads to a \s{skip} statement.
769
770We will presuppose that all programs are \emph{well-labelled}, i.e.\ every label labels at most one occurrence of a statement in a program, and every \s{goto} points to a label actually present in the program.
771The \s{find} helper function has the task of not only finding the labelled statement in the program, but also building the correct continuation.
772The continuation built by \s{find} replaces the current continuation in the case of a jump.
773
774\paragraph{Further down the compilation chain}
775We abstract over the rest of the compilation chain.
776We posit the existence of a suitable notion of `sequential instructions', wherein each instruction has a single natural successor to which we can add our own, for every language $L$ further down the compilation chain.
777
778\subsection{Loop transformations}
779We call a loop $L$ \emph{single-entry} in $P$ if there is no \s{goto} to $P$ outside of $L$ which jumps into $L$.\footnote{This is a reasonable aproximation: it defines a loop as multi-entry if it has an external but unreachable \s{goto} jumping into it.}
780Many loop optimisations do not preserve the semantics of multi-entry loops in general, or are otherwise rendered ineffective.
781Usually compilers implement a single-entry loop detection which avoids the multi-entry ones from being targeted by optimisations~\cite{muchnick,morgan}.
782The loop transformations we present are local, i.e.\ they target a single loop and transform it.
783Which loops are targeted may be decided by some \emph{ad hoc} heuristic.
784However, the precise details of which loops are targetted and how is not important here.
785
786\paragraph{Loop peeling}
787$$
788\sop{while}b\sbin{do}S \mapsto \sop{if}b\sbin{then} S; \sop{while} b \sbin{do} S[\ell'_i/\ell_i]
789$$
790where $\ell'_i$ is a fresh label for any $\ell_i$ labelling a statement in $S$.
791This relabelling is safe for \s{goto}s occurring outside the loop because of the single-entry condition.
792Note that for \s{break} and \s{continue} statements, those should be replaced with \s{goto}s in the peeled body $S$.
793
794\paragraph{Loop unrolling}
795$$
796\sop{while}b\sbin{do}S\mapsto
797\sop{while} b \sbin{do} (S ;
798  \sop{if} b \sbin{then} (S[\ell^1_i/\ell_i] ;
799  \cdots
800  \sop{if} b \sbin{then} S[\ell^n_i/\ell_i]) \cdots)
801$$
802where $\ell^j_i$ are again fresh labels for any $\ell_i$ labelling a statement in $S$.
803This is a willfully na\"{i}ve version of loop unrolling, which usually targets less general loops.
804The problem this transformation poses to CerCo's labelling approach are independent of the sophistication of the actual transformation.
805
806\section{Labelling: a quick sketch of the previous approach}
807\label{sec:labelling}
808Plainly labelled \imp{} is obtained by adding to the code \emph{cost labels} (with metavariables $\alpha,\beta,\ldots$), and cost-labelled statements:
809$$
810S,T\gramm \cdots \mid \alpha: S
811$$
812Cost labels allow us to track some program points along the compilation chain.
813For further details we refer to~\cite{D2.1}.
814
815With labels the small step semantics turns into a labelled transition system along with a natural notion of trace (i.e.\ lists of labels) arises.
816The evaluation of statements is enriched with traces, so that rules follow a pattern similar to the following:
817$$
818\begin{array}{lblL}
819(\alpha: S, K,s) &\to[\alpha]_P (S,K,s)\\
820(\s{skip}, S \cdot K,s) &\to[\varepsilon]_P (S, K, s)\\
821& \text{etc.}
822\end{array}$$
823Here, we identify cost labels $\alpha$ with singleton traces and we use $\varepsilon$ for the empty trace.
824Cost labels are emitted by cost-labelled statements only\footnote{In the general case the evaluation of expressions can emit cost labels too (see~\ref{sec:conc}).}.
825We then write $\to[\lambda]\!\!^*$ for the transitive closure of the small step semantics which produces by concatenation the trace $\lambda$.
826
827\paragraph{Labelling}
828Given an \imp{} program $P$ its \emph{labelling} $\alpha:\Ell(P)$ in $\ell-\imp$ is defined by putting cost labels after every branching statement, at the start of both branches, and a cost label at the beginning of the program.
829The relevant cases are
830$$\begin{aligned}
831  \Ell(\sop{if}e\sbin{then}S\sbin{else}T) &=
832    \sop{if}e\sbin{then}\alpha:\Ell(S)\sbin{else}\beta:\Ell(T)\\
833  \Ell(\sop{while}e\sbin{do}S) &=
834    (\sop{while}e\sbin{do}\alpha:\Ell(S));\beta:\s{skip}
835  \end{aligned}$$
836where $\alpha,\beta$ are fresh cost labels.
837In all other cases the definition just passes to substatements.
838
839\paragraph{Labels in the rest of the compilation chain}
840All languages further down the chain get a new sequential statement $\sop{emit}\alpha$ whose effect is to be consumed in a labelled transition while keeping the same state.
841All other instructions guard their operational semantics and do not emit cost labels.
842
843Preservation of semantics throughout the compilation process is restated, in rough terms, as:
844$$
845\text{starting state of $P$}\to[\lambda]\!\!^*\;\text{halting state} \iff
846\text{starting state of $\mathcal C(P)$} \to[\lambda]\!\!^*\;\text{halting state}
847$$
848Here $P$ is a program of a language along the compilation chain, starting and halting states depend on the language, and $\mathcal C$ is the compilation function\footnote{The case of divergent computations needs to be addressed too.
849Also, the requirement can be weakened by demanding some sort weaker form of equivalence of the traces than equality.
850Both of these issues are beyond the scope of this presentation.}.
851
852\paragraph{Instrumentations}
853Let $\mathcal C$ be the whole compilation from $\ell\imp$ to the labelled version of some low-level language $L$.
854Supposing such compilation has not introduced any new loop or branching, we have that:
855\begin{itemize}
856\item
857Every loop contains at least a cost label (\emph{soundness condition})
858\item
859Every branching has different labels for the two branches (\emph{preciseness condition}).
860\end{itemize}
861With these two conditions, we have that each and every cost label in $\mathcal C(P)$ for any $P$ corresponds to a block of sequential instructions, to which we can assign a constant \emph{cost}\footnote{This in fact requires the machine architecture to be `simple enough', or for some form of execution analysis to take place.}
862We therefore may assume the existence of a \emph{cost mapping} $\kappa_P$ from cost labels to natural numbers, assigning to each cost label $\alpha$ the cost of the block containing the single occurrance of $\alpha$.
863
864Given any cost mapping $\kappa$, we can enrich a labelled program so that a particular fresh variable (the \emph{cost variable} $c$) keeps track of the summation of costs during the execution.
865We call this procedure \emph{instrumentation} of the program, and it is defined recursively by:
866$$
867\mathcal I(\alpha:S) = c \ass c + \kappa(\alpha) ; \mathcal I(S)
868$$
869In all other cases the definition passes to substatements.
870
871\paragraph{The problem with loop optimisations}
872Let us take loop peeling, and apply it to the labelling of a program without any prior adjustment:
873$$
874(\sop{while}e\sbin{do}\alpha:S);\beta:\s{skip}
875\mapsto
876(\sop{if}b\sbin{then} \alpha : S; \sop{while} b \sbin{do} \alpha :
877S[\ell'_i/\ell_i]);
878\beta:\s{skip}
879$$
880What happens is that the cost label $\alpha$ is duplicated with two distinct occurrences.
881If these two occurrences correspond to different costs in the compiled code, the best the cost mapping can do is to take the maximum of the two, preserving soundness (i.e.\ the cost estimate still bounds the actual one) but losing preciseness (i.e.\ the actual cost would be strictly less than its estimate).
882
883\section{Indexed labels}
884\label{sec:indexedlabels}
885This section presents the core of the new approach.
886In brief points it amounts to the following:
887\begin{enumerate}[\bfseries~\ref*{sec:indexedlabels}.1.]
888\item
889\label{en:outline1}
890Enrich cost labels with formal indices corresponding, at the beginning of the process, to which iteration of the loop they belong to.
891\item
892\label{en:outline2}
893Each time a loop transformation is applied and a cost labels is split in different occurrences, each of these will be reindexed so that every time they are emitted their position in the original loop will be reconstructed.
894\item
895\label{en:outline3}
896Along the compilation chain, alongside the \s{emit} instruction we add other instructions updating the indices, so that iterations of the original loops can be rebuilt at the operational semantics level.
897\item
898\label{en:outline4}
899The machinery computing the cost mapping will still work, but assigning costs to indexed cost labels, rather than to cost labels as we wish.
900However, \emph{dependent costs} can be calculated, where dependency is on which iteration of the containing loops we are in.
901\end{enumerate}
902
903\subsection{Indexing the cost labels}
904\label{ssec:indlabs}
905
906\paragraph{Formal indices and $\iota\ell\imp$}
907Let $i_0,i_1,\ldots$ be a sequence of distinguished fresh identifiers that will be used as loop indices.
908A \emph{simple expression} is an affine arithmetical expression in one of these indices, that is $a*i_k+b$ with $a,b,k \in \mathbb N$.
909Simple expressions $e_1=a_1*i_k+b_1$, $e_2=a2*i_k+b_2$ in the same index can be composed, yielding $e_1\circ e_2\ass (a_1a_2)*i_k + (a_1b2+b_1)$, and this operation has an identity element in $id_k \ass 1*i_k+0$.
910Constants can be expressed as simple expressions, so that we identify a natural $c$ with $0*i_k+c$.
911
912An \emph{indexing} (with metavariables $I$, $J$, \ldots) is a list of transformations of successive formal indices dictated by simple expressions, that is a mapping\footnote{Here we restrict each mapping to be a simple expression \emph{on the same index}.
913This might not be the case if more loop optimisations are accounted for (for example, interchanging two nested loops).}
914$$
915i_0\mapsto a_0*i_0+b_0,\dots, i_{k-1} \mapsto a_{k-1}*i_{k-1}+b_{k-1}
916$$
917
918An \emph{indexed cost label} (metavariables $\alphab$, $\betab$, \ldots) is the combination of a cost label $\alpha$ and an indexing $I$, written $\alpha\la I\ra$.
919The cost label underlying an indexed one is called its \emph{atom}.
920All plain labels can be considered as indexed ones by taking an empty indexing.
921
922\imp{} with indexed labels ($\iota\ell\imp$) is defined by adding to $\imp$ statements with indexed labels, and by having loops with formal indices attached to them:
923$$
924S,T,\ldots \gramm \cdots i_k:\sop{while}e\sbin{do}S\mid \alphab : S
925$$
926Note than unindexed loops still exist in the language: they will correspond to multi-entry loops which are ignored by indexing and optimisations.
927We will discuss the semantics later.
928
929\paragraph{Indexed labelling}
930Given an $\imp$ program $P$, in order to index loops and assign indexed labels, we must first distinguish single-entry loops.
931We sketch how this can be computed in the sequel.
932
933A first pass of the program $P$ can easily compute two maps: $\s{loopof}_P$ from each label $\ell$ to the occurrence (i.e.\ the path) of a $\s{while}$ loop containing $\ell$, or the empty path if none exists; and $\s{gotosof}_P$ from a label $\ell$ to the occurrences of \s{goto}s pointing to it.
934Then the set $\s{multientry}_P$ of multi-entry loops of $P$ can be computed by
935$$
936\s{multientry}_P\ass\{\, p \mid \exists \ell,q.p =\s{loopof}_P(\ell),q\in\s{gotosof}_P(\ell), q \not\le p\,\}
937$$
938Here $\le$ is the prefix relation\footnote{Possible simplifications to this procedure include keeping track of just the while loops containing labels and \s{goto}s (rather than paths in the syntactic tree of the program), and making two passes while avoiding building the map to sets $\s{gotosof}$}.
939
940Let $Id_k$ be the indexing of length $k$ made from identity simple expressions, i.e.\ the sequence $i_0\mapsto id_0, \ldots , i_{k-1}\mapsto id_{k-1}$.
941We define the tiered indexed labelling $\Ell^\iota_P (S,k)$ in program $P$ for occurrence $S$ of a statement in $P$ and a natural $k$ by recursion, setting:
942$$
943\Ell^\iota_P(S,k)\ass
944\left\{
945\begin{array}{lh{-100pt}l}
946 (i_k:\sop{while}b\sbin{do}\alpha\la Id_{k+1}\ra : \Ell^\iota_P(T,k+1));\beta\la Id_k \ra : \s{skip}
947\\& \text{if $S=\sop{while}b\sbin{do}T$ and $S\notin \s{multientry}_P$,}\\[3pt]
948(\sop{while}b\sbin{do}\alpha\la Id_k \ra : \Ell^\iota_P(T,k));\beta\la Id_k \ra : \s{skip}
949\\& \text{otherwise, if $S=\sop{while}b\sbin{do}T$,}\\[3pt]
950\sop{if}b\sbin{then} \alpha\la Id_k \ra : \Ell^\iota_P(T_1,k) \sbin{else} \beta\la Id_k \ra : \Ell^\iota_P(T_2,k)
951\\&\text{if $S=\sop{if}b\sbin{then}T_1\sbin{else}T_2$,}\\[3pt]
952\ldots
953\end{array}
954\right.
955$$
956Here, as usual, $\alpha$ and $\beta$ are fresh cost labels, and other cases just keep making the recursive calls on the substatements.
957The \emph{indexed labelling} of a program $P$ is then defined as $\alpha\la \ra : \Ell^\iota_P(P,0)$, i.e.\ a further fresh unindexed cost label is added at the start, and we start from level $0$.
958
959In plainer words: each single-entry loop is indexed by $i_k$ where $k$ is the number of other single-entry loops containing this one, and all cost labels under the scope of a single-entry loop indexed by $i_k$ are indexed by all indices $i_0,\ldots,i_k$, without any transformation.
960
961\subsection{Indexed labels and loop transformations}
962We define the \emph{reindexing} $I \circ (i_k\mapsto a*i_k+b)$ as an operator on indexings by setting:
963\begin{multline*}
964(i_0\mapsto e_0,\ldots, i_k \mapsto e_k,\ldots,i_n\mapsto e_n)
965\circ(i_k\mapsto a*i_k+b)
966\ass\\
967i_0\mapsto e_0,\ldots, i_k \mapsto e_k \circ(a*i_k+b),\ldots,i_n\mapsto e_n,
968\end{multline*}
969We further extend to indexed labels (by $\alpha\la I\ra\circ(i_k\mapsto e)\ass \alpha\la I\circ (i_k\mapsto e)\ra$) and also to statements in $\iota\ell\imp$ (by applying the above transformation to all indexed labels).
970
971We can then redefine loop peeling and loop unrolling, taking into account indexed labels.
972It will only be possible to apply the transformation to indexed loops, that is loops that are single-entry.
973The attentive reader will notice that no assumptions are made on the labelling of the statements that are involved.
974In particular the transformation can be repeated and composed at will.
975Also, note that after erasing all labelling information (i.e.\ indexed cost labels and loop indices) we recover exactly the same transformations presented in~\ref{sec:defimp}.
976
977\paragraph{Indexed loop peeling}
978$$
979i_k:\sop{while}b\sbin{do}S\mapsto
980\sop{if}b\sbin{then} S\circ (i_k\mapsto 0); i_k : \sop{while} b \sbin{do} S[\ell'_i/\ell_i]\circ(i_k\mapsto i_k + 1)
981$$
982As can be expected, the peeled iteration of the loop gets reindexed, always being the first iteration of the loop, while the iterations of the remaining loop are shifted by $1$.
983
984\paragraph{Indexed loop unrolling}
985$$
986\begin{array}{l}
987\begin{array}{ncn}
988i_k:\sop{while}b\sbin{do}S\\
989\tikz\node[rotate=-90,inner sep=0pt]{$\mapsto$};
990\end{array}\\
991i_k:\sop{while} b \sbin{do}\\
992\quad (S\circ(i_k\mapsto n*i_k) ;\\
993\quad \sop{if} b \sbin{then}\\
994\quad\quad (S[\ell^1_i/\ell_i]\circ(i_k\mapsto n*i_k+1) ;\\
995\quad\quad\quad \vdots \\
996\quad\quad \quad \sop{if} b \sbin{then}\\
997\quad \quad \quad \quad S[\ell^n_i/\ell_i]\circ(i_k\mapsto n*i_k+n-1)
998)\cdots )
999\end{array}
1000$$
1001Again, the reindexing is as expected: each copy of the unrolled body has its indices remapped so that when they are executed, the original iteration of the loop to which they correspond can be recovered.
1002
1003\subsection{Semantics and compilation of indexed labels}
1004In order to make sense of loop indices, one must keep track of their values in the state.
1005A \emph{constant indexing} (metavariables $C,\ldots$) is an indexing which employs only constant simple expressions.
1006The evaluation of an indexing $I$ in a constant indexing $C$, noted $I|_C$, is defined by:
1007$$
1008I\circ(i_0\mapsto c_0,\ldots, i_{k-1}\mapsto c_{k-1}) \ass \alphab\circ(i_0\mapsto c_0)\circ\cdots\circ(i_{k-1}\mapsto c_{k-1})
1009$$
1010Here, we are using the definition of ${-}\circ{-}$ given in~\ref{ssec:indlabs}.
1011We consider the above defined only if the the resulting indexing is a constant one too\footnote{For example $(i_0\mapsto 2*i_0,i_1\mapsto i_1+1)|_{i_0\mapsto 2}$ is undefined, but $(i_0\mapsto 2*i_0,i_1\mapsto 0)|_{i_0\mapsto 2}= i_0\mapsto 4,i_1\mapsto 2$, is indeed a constant indexing, even if the domain of the original indexing is not covered by the constant one.}.
1012The definition is extended to indexed labels by $\alpha\la I\ra|_C\ass \alpha\la I|_C\ra$.
1013
1014Constant indexings will be used to keep track of the exact iterations of the original code that the emitted labels belong to.
1015We thus define two basic actions to update constant indexings: $C[i_k{\uparrow}]$ increments the value of $i_k$ by one, and $C[i_k{\downarrow}0]$ resets it to $0$.
1016
1017We are ready to update the definition of the operational semantics of indexed labelled \imp.
1018The emitted cost labels will now be ones indexed by constant indexings.
1019We add a special indexed loop construct for continuations that keeps track of active indexed loop indices:
1020$$
1021K,\ldots  \gramm \cdots | i_k:\sop{while} b \sbin {do} S \sbin{then}  K
1022$$
1023The difference between the regular stack concatenation $i_k:\sop{while}b\sbin{do}S\cdot K$ and the new constructor is that the latter indicates the loop is the active one in which we already are, while the former is a loop that still needs to be started\footnote{In the presence of \s{continue} and \s{break} statements active loops need to be kept track of in any case.}.
1024The \s{find} function is updated accordingly with the case
1025$$
1026\s{find}(\ell, i_k:\sop{while}b\sbin{do}S, K) \ass \s{find}(\ell, S, i_k: \sop{while}b\sbin{do}S\sbin{then}K)
1027$$
1028The state will now be a 4-tuple $(S,K,s,C)$ which adds a constant indexing to the triple of the regular semantics.
1029The small-step rules for all statements remain the same, without touching the $C$ parameter (in particular unindexed loops behave the same as usual), apart from the ones regarding cost-labels and indexed loops.
1030The remaining cases are:
1031$$\begin{aligned}
1032   (\alphab : S,K,s,C) &\to[\alphab|_C]_P (S,K,s,C)\\
1033   (i_k:\sop{while}b\sbin{do}S,K,C) &\to[\varepsilon]_P
1034    \begin{cases}
1035     (S,i_k:\sop{while}b\sbin{do}S\sbin{then} K,s,C[i_k{\downarrow}0])
1036     \\\hskip 125pt \text{if $(b,s)\eval v\neq 0$,}\\
1037     \rlap{(\s{skip}, K, s, C)}\hskip 125pt \text{otherwise}
1038    \end{cases}\\
1039   (\s{skip}, i_k:\sop{while}b\sbin{do}S\sbin{then}K,C) &\to[\varepsilon]_P
1040    \begin{cases}
1041     (S,i_k:\sop{while}b\sbin{do}S\sbin{then} K,s,C[i_k{\uparrow}])
1042      \\\hskip 125pt \text{if $(b,s)\eval v\neq 0$,}\\
1043     \rlap{(\s{skip}, K, s, C)} \hskip 125pt \text{otherwise}
1044    \end{cases}
1045  \end{aligned}$$
1046Some explanations are in order:
1047\begin{itemize}
1048\item
1049Emitting a label always instantiates it with the current indexing.
1050\item
1051Hitting an indexed loop the first time initializes the corresponding index to 0; continuing the same loop increments the index as expected.
1052\item
1053The \s{find} function ignores the current indexing: this is correct under the assumption that all indexed loops are single entry, so that when we land inside an indexed loop with a \s{goto}, we are sure that its current index is right.
1054\item
1055The starting state with store $s$ for a program $P$ is $(P,\s{halt},s,(i_0\mapsto 0,\dots,i_{n-1}\mapsto 0)$ where $i_0,\ldots,i_{n-1}$ cover all loop indices of $P$\footnote{For a program which is the indexed labelling of an \imp{} one this corresponds to the maximum nesting of single-entry loops.
1056We can also avoid computing this value in advance if we define $C[i{\downarrow}0]$ to extend $C$'s domain as needed, so that the starting constant indexing can be the empty one.}.
1057\end{itemize}
1058
1059\paragraph{Compilation}
1060Further down the compilation chain the loop structure is usually partially or completely lost.
1061We cannot rely on it anymore to keep track of the original source code iterations.
1062We therefore add, alongside the \s{emit} instruction, two other sequential instructions $\sop{ind_reset}k$ and $\sop{ind_inc}k$ whose sole effect is to reset to 0 (resp.\ increment by 1) the loop index $i_k$, as kept track of in a constant indexing accompanying the state.
1063
1064The first step of compilation from $\iota\ell\imp$ consists of prefixing the translation of an indexed loop $i_k:\s{while}\ b\ \s{do}\ S$ with $\sop{ind_reset}k$ and postfixing the translation of its body $S$ with $\sop{ind_inc}k$.
1065Later in the compilation chain we must propagate the instructions dealing with cost labels.
1066
1067We would like to stress the fact that this machinery is only needed to give a suitable semantics of observables on which preservation proofs can be done.
1068By no means are the added instructions and the constant indexing in the state meant to change the actual (let us say denotational) semantics of the programs.
1069In this regard the two new instruction have a similar role as the \s{emit} one.
1070A forgetful mapping of everything (syntax, states, operational semantics rules) can be defined erasing all occurrences of cost labels and loop indices, and the result will always be a regular version of the language considered.
1071
1072\paragraph{Stating the preservation of semantics}
1073In fact, the statement of preservation of semantics does not change at all, if not for considering traces of evaluated indexed cost labels rather than traces of plain ones.
1074
1075\subsection{Dependent costs in the source code}
1076\label{ssec:depcosts}
1077The task of producing dependent costs from constant costs induced by indexed labels is quite technical.
1078Before presenting it here, we would like to point out that the annotations produced by the procedure described in this Subsection, even if correct, can be enormous and unreadable.
1079In Section~\ref{sec:conc}, where we detail the actual implementation, we will also sketch how we mitigated this problem.
1080
1081Having the result of compiling the indexed labelling $\Ell^\iota(P)$ of an \imp{} program $P$, we may still suppose that a cost mapping can be computed, but from indexed labels to naturals.
1082We want to annotate the source code, so we need a way to express and compute the costs of cost labels, i.e.\ group the costs of indexed labels to ones of their atoms.
1083In order to do so we introduce \emph{dependent costs}.
1084Let us suppose that for the sole purpose of annotation, we have available in the language ternary expressions of the form
1085$$\tern e {f_1}{f_2},$$
1086and that we have access to common operators on integers such as equality, order and modulus.
1087
1088\paragraph{Simple conditions}
1089
1090First, we need to shift from \emph{transformations} of loop indices to \emph{conditions} on them.
1091We identify a set of conditions on natural numbers which are able to express the image of any composition of simple expressions.
1092\emph{Simple conditions} are of three possible forms:
1093\begin{itemize}
1094\item
1095Equality $i_k=n$ for some natural $n$.
1096\item
1097Inequality $i_k\ge n$ for some natural $n$.
1098\item
1099Modular equality together with inequality $i_k\bmod a = b\wedge i_k\ge n$ for naturals $a, b, n$.
1100\end{itemize}
1101The `always true' simple condition is given by $i_k\ge 0$.
1102We write $i_k\bmod a = b$ as a simple condition for $i_k\bmod a = b\wedge i_k\ge 0$.
1103
1104Given a simple condition $p$ and a constant indexing $C$ we can easily define when $p$ holds for $C$ (written $p\circ C$).
1105A \emph{dependent cost expression} is an expression built solely out of integer constants and ternary expressions with simple conditions at their head.
1106Given a dependent cost expression $e$ where all of the loop indices appearing in it are in the domain of a constant indexing $C$, we can define the value $e\circ C\in \mathbb N$ by:
1107$$n\circ C\ass n,\qquad (\tern p e f)\circ C\ass
1108\begin{cases}
1109  e\circ C& \text{if $p\circ C$,}\\
1110  f\circ C& \text{otherwise.}
1111\end{cases}$$
1112
1113\paragraph{From indexed costs to dependent ones}
1114Every simple expression $e$ corresponds to a simple condition $p(e)$ which expresses the set of values that $e$ can take.
1115Following is the definition of such a relation.
1116We recall that in this development, loop indices are always mapped to simple expressions over the same index.
1117If it was not the case, the condition obtained from an expression should be on the mapped index, not the indeterminate of the simple expression.
1118We leave all generalisations of what we present here for further work:
1119$$
1120p(a*i_k+b)\ass
1121\begin{cases}
1122i_k = b & \text{if $a = 0$,}\\
1123i_k \ge b & \text{if $a = 1$,}\\
1124i_k\bmod a = b \wedge i_k \ge b & \text{otherwise}.
1125\end{cases}
1126$$
1127Now, suppose we are given a mapping $\kappa$ from indexed labels to natural numbers.
1128We will transform it in a mapping (identified, via abuse of notation, with the same symbol $\kappa$) from atoms to \imp{} expressions built with ternary expressions which depend solely on loop indices.
1129To that end we define an auxiliary function $\kappa^\alpha_L$, parameterized by atoms and words of simple expressions, and defined on \emph{sets} of $n$-uples of simple expressions (with $n$ constant across each such set, i.e.\ each set is made of words all with the same length).
1130
1131We will employ a bijection between words of simple expressions and indexings, given by:\footnote{Lists of simple expressions are in fact how indexings are -represented in CerCo's current implementation of the compiler.}
1132$$
1133i_0\mapsto e_0,\ldots,i_{k-1}\mapsto e_{k-1} \cong e_0\cdots e_{k-1}.
1134$$
1135As usual, $\varepsilon$ denotes the empty word/indexing, and juxtaposition is used to denote word concatenation.
1136
1137For every set $s$ of $n$-uples of simple expressions, we are in one of the following three exclusive cases:
1138\begin{itemize}
1139\item
1140$S=\emptyset$.
1141\item
1142$S=\{\varepsilon\}$.
1143\item
1144There is a simple expression $e$ such that $S$ can be decomposed in $eS'+S''$, with $S'\neq \emptyset$ and none of the words in $S''$ starting with $e$.
1145\end{itemize}
1146Here $eS'$ denotes prepending $e$ to all elements of $S'$ and $+$ is disjoint union.
1147This classification can serve as the basis of a definition by recursion on $n+\card S$ where $n$ is the size of tuples in $S$ and $\card S$ is its cardinality.
1148Indeed in the third case in $S'$ the size of tuples decreases strictly (and cardinality does not increase) while for $S''$ the size of tuples remains the same but cardinality strictly decreases.
1149The expression $e$ of the third case will be chosen as minimal for some total order\footnote{The specific order used does not change the correctness of the procedure, but different orders can give more or less readable results. A ``good'' order is the lexicographic one, with $a*i_k+b \le a'*i_k+b'$ if $a<a'$ or $a=a'$ and $b\le b'$.}.
1150
1151Following is the definition of the auxiliary function $\kappa^\alpha_L$, which follows the recursion scheme presented above:
1152$$
1153\begin{aligned}
1154\kappa^\alpha_L(\emptyset) &\ass 0\\
1155\kappa^\alpha_L(\{\varepsilon\}) &\ass \kappa(\alpha\la L\ra) \\
1156\kappa^\alpha_L(eS'+S'') &\ass \tern{p(e)}{\kappa^\alpha_{Le}(S')}{\kappa^\alpha_L(S'')}
1157\end{aligned}
1158$$
1159\noindent
1160Finally, the wanted dependent cost mapping is defined by
1161$$
1162\kappa(\alpha)\ass\kappa^\alpha_\varepsilon(\{\,L\mid \alpha\la L\ra \text{ appears in the compiled code}\,\})
1163$$
1164
1165\paragraph{Indexed instrumentation}
1166The \emph{indexed instrumentation} generalises the instrumentation presented in~\ref{sec:labelling}.
1167We described above how cost atoms can be mapped to dependent costs.
1168The instrumentation must also insert code dealing with the loop indices.
1169As instrumentation is done on the code produced by the labelling phase, all cost labels are indexed by identity indexings.
1170The relevant cases of the recursive definition (supposing $c$ is the cost variable) are then:
1171$$
1172\begin{aligned}
1173\mathcal I^\iota(\alpha\la Id_k\ra:S) &= c\ass c + \kappa(\alpha);\mathcal I^\iota(S)\\
1174\mathcal I^\iota(i_k : \sop{while}b\sbin{do}S) &=
1175  i_k \ass 0; \sop{while}b\sbin{do}(\mathcal I^\iota (S); i_k \ass i_k + 1)
1176\end{aligned}
1177$$
1178
1179\section{Notes on the implementation and further work}
1180\label{sec:conc}
1181Implementing the indexed label approach in CerCo's untrusted Ocaml prototype does not introduce many new challenges beyond what has already been presented for the toy language, \imp{} with \s{goto}s.
1182\s{Clight}, the \s{C} fragment source language of CerCo's compilation chain~\cite{D2.1}, has several more fetaures, but few demand changes in the indexed labelled approach.
1183
1184\paragraph{Indexed loops \emph{vs}. index update instructions}
1185In our presentation we have indexed loops in $\iota\ell\imp$, while we hinted that later languages in the compilation chain would have specific index update instructions.
1186In CerCo's actual compilation chain from \s{Clight} to 8051 assembly, indexed loops are only in \s{Clight}, while from \s{Cminor} onward all languages have the same three cost-involving instructions: label emitting, index resetting and index incrementing.
1187
1188\paragraph{Loop transformations in the front end}
1189We decided to implement the two loop transformations in the front end, namely in \s{Clight}.
1190This decision is due to user readability concerns: if costs are to be presented to the programmer, they should depend on structures written by the programmer himself.
1191If loop transformation were performed later it would be harder to create a correspondence between loops in the control flow graph and actual loops written in the source code.
1192However, another solution would be to index loops in the source code and then use these indices later in the compilation chain to pinpoint explicit loops of the source code: loop indices can be used to preserve such information, just like cost labels.
1193
1194\paragraph{Break and continue statements}
1195\s{Clight}'s loop flow control statements for breaking and continuing a loop are equivalent to appropriate \s{goto} statements.
1196The only difference is that we are assured that they cannot cause loops to be multi-entry, and that when a transformation such as loop peeling is complete, they need to be replaced by actual \s{goto}s (which happens further down the compilation chain anyway).
1197
1198\paragraph{Function calls}
1199Every internal function definition has its own space of loop indices.
1200Executable semantics must thus take into account saving and resetting the constant indexing of current loops upon hitting a function call, and restoring it upon return of control.
1201A peculiarity is that this cannot be attached to actions that save and restore frames: namely in the case of tail calls the constant indexing needs to be saved whereas the frame does not.
1202
1203\paragraph{Cost-labelled expressions}
1204In labelled \s{Clight}, expressions also get cost labels, due to the presence of ternary conditional expressions (and lazy logical operators, which get translated to ternary expressions too).
1205Adapting the indexed labelled approach to cost-labelled expressions does not pose any particular problems.
1206
1207\paragraph{Simplification of dependent costs}
1208As previously mentioned, the na\"{i}ve application of the procedure described in~\ref{ssec:depcosts} produces unwieldy cost annotations.
1209In our implementation several transformations are used to simplify such complex dependent costs.
1210
1211Disjunctions of simple conditions are closed under all logical operations, and it can be computed whether such a disjunction implies a simple condition or its negation.
1212This can be used to eliminate useless branches of dependent costs, to merge branches that share the same value, and possibly to simplify the third case of simple condition.
1213Examples of the three transformations are respectively:
1214\begin{itemize}
1215\item $
1216\verb+(_i_0 == 0)?+x\verb+:(_i_0 >= 1)?+y\verb+:+z
1217\mapsto
1218\verb+(_i_0 == 0)?+x\verb+:+y,
1219$
1220\item $
1221c\texttt{?}x\verb+:(+d\texttt{?}x\texttt{:}y\verb+)+
1222\mapsto
1223\texttt{(}c\texttt{ || }d\texttt{)?}x\texttt{:}y,
1224$
1225\item \begin{tabular}[t]{np{\linewidth}n}
1226$\verb+(_i_0 == 0)?+x\verb+:(_i_0 % 2 == 0 && _i_0 >= 2)?+y\verb+:+z
1227\mapsto$ \\\hfill
1228$\verb+(_i_0 == 0)?+x\verb+:(_i_0 % 2 == 0)?+y\verb+:+z.
1229$\end{tabular}
1230\end{itemize}
1231The second transformation tends to accumulate disjunctions, to the detriment of readability.
1232A further transformation swaps two branches of the ternary expression if the negation of the condition can be expressed with fewer clauses.
1233For example:
1234$$ \verb+(_i_0 % 3 == 0 || _i_0 % 3 == 1)?+x\verb+:+y \mapsto
1235\verb+(_i_0 % 3 == 2)?+y\verb+:+x.
1236$$
1237
1238\paragraph{Updates to the frama-C cost plugin}
1239Cerco's frama-C~\cite{framac} cost plugin\todo{is there a reference for this?}{} has been updated to take into account our new notion of dependent costs.
1240The frama-c framework expands ternary expressions to branch statements, introducing temporaries along the way.
1241This makes the task of analyzing ternary cost expressions rather daunting.
1242It was deemed necessary to provide an option in the compiler to use actual branch statements for cost annotations rather than ternary expressions, so that at least frama-C's use of temporaries in cost annotation could be avoided.
1243The cost analysis carried out by the plugin now takes into account such dependent costs.
1244
1245The only limitation (which actually simplifies the code) is that, within a dependent cost, simple conditions with modulus on the same loop index should not be modulo different numbers.
1246This corresponds to a reasonable limitation on the number of times loop unrolling may be applied to the same loop: at most once.
1247
1248\paragraph{Further work}
1249For the time being, indexed labels are only implemented in the untrusted Ocaml compiler, while they are not present yet in the Matita code.
1250Porting them should pose no significant problem.
1251Once ported, the task of proving properties about them in Matita can begin.
1252
1253Because most of the executable operational semantics of the languages across the frontend and the backend are oblivious to cost labels, it should be expected that the bulk of the semantic preservation proofs that still needs to be done will not get any harder because of indexed labels.
1254The only trickier point that we foresee would be in the translation of \s{Clight} to \s{Cminor}, where we pass from structured indexed loops to atomic instructions on loop indices.
1255
1256An invariant which should probably be proved and provably preserved along the compilation chain is the non-overlap of indexings for the same atom.
1257Then, supposing cost correctness for the unindexed approach, the indexed one will just need to amend the proof that
1258$$\forall C\text{ constant indexing}.\forall \alpha\la I\ra\text{ appearing in the compiled code}.
1259  \kappa(\alpha)\circ (I\circ C) = \kappa(\alpha\la I \ra).
1260$$
1261Here, $C$ represents a snapshot of loop indices in the compiled code, while $I\circ C$ is the corresponding snapshot in the source code.
1262Semantics preservation will ensure that when, with snapshot $C$, we emit $\alpha\la I\ra$ (that is, we have $\alpha\la I\circ C\ra$ in the trace), $\alpha$ must also be emitted in the source code with indexing $I\circ C$, so the cost $\kappa(\alpha)\circ (I\circ C)$ applies.
1263
1264Aside from carrying over the proofs, we would like to extend the approach to more loop transformations.
1265Important examples are loop inversion (where a for loop is reversed, usually to make iterations appear to be truly independent) or loop interchange (where two nested loops are swapped, usually to have more loop invariants or to enhance strength reduction).
1266This introduces interesting changes to the approach, where we would have indexings such as:
1267$$i_0\mapsto n - i_0\quad\text{or}\quad i_0\mapsto i_1, i_1\mapsto i_0.$$
1268In particular dependency over actual variables of the code would enter the frame, as indexings would depend on the number of iterations of a well-behaving guarded loop (the $n$ in the first example).
1269
1270Finally, as stated in the introduction, the approach should allow some integration of techniques for cache analysis, a possibility that for now has been put aside as the standard 8051 target architecture for the CerCo project lacks a cache.
1271Two possible developments for this line of work present themselves:
1272\begin{enumerate}
1273\item
1274One could extend the development to some 8051 variants, of which some have been produced with a cache.
1275\item
1276One could make the compiler implement its own cache: this cannot apply to \textsc{ram} accesses of the standard 8051 architecture, as the difference in cost of accessing the two types of \textsc{ram} is only one clock cycle, which makes any implementation of cache counterproductive.
1277So for this proposal, we could either artificially change the accessing cost of \textsc{ram} of the model just for the sake of possible future adaptations to other architectures, or otherwise model access to an external memory by means of the serial port.
1278\end{enumerate}
1279
1280
1281%
1282% \newpage
1283%
1284% \includepdf[pages={-}]{plugin.pdf}
1285%
1286%
1287% \newpage
1288%
1289% \includepdf[pages={-}]{fopara.pdf}
1290%
1291%
1292% \newpage
1293%
1294% \includepdf[pages={-}]{tlca.pdf}
1295%
1296% \bibliographystyle{plain}
1297\bibliography{bib}
1298
1299\end{document}
Note: See TracBrowser for help on using the repository browser.