source: Deliverables/addenda/indexed_labels/report.tex

Last change on this file was 1687, checked in by tranquil, 7 years ago

corrected title

File size: 77.3 KB
Line 
1
2\documentclass[11pt,epsf,a4wide]{article}
3\usepackage{../../style/cerco}
4\usepackage{pdfpages}
5
6\usepackage{graphics}
7
8% For SLNCS comment above and use
9% \documentclass{llncs}
10
11
12
13
14
15\RequirePackage[latin1]{inputenc}     
16 
17% Mettre les différents packages et fonctions que l'on utilise
18\usepackage[english]{babel} 
19\usepackage{amsmath} 
20\usepackage{amsfonts} 
21\usepackage{amssymb} 
22\usepackage{xspace} 
23\usepackage{latexsym} 
24\usepackage{url} 
25\usepackage{xspace} 
26%\usepackage{fancyvrb}
27%\usepackage[all]{xy}
28%packages pour LNCS
29%\usepackage{semantic}
30%\usepackage{cmll}
31% Packages for RR
32\usepackage{graphics,color}             
33\RequirePackage[latin1]{inputenc}   
34\usepackage{array}
35 
36%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
37%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
38 
39\newenvironment{comment}{{\bf MORE WORK:}} 
40 
41\newenvironment{restate-proposition}[2][{}]{\noindent\textbf{Proposition~{#2}}
42\;\textbf{#1}\  
43}{\vskip 1em} 
44 
45\newenvironment{restate-theorem}[2][{}]{\noindent\textbf{Theorem~{#2}}\;\textbf{
46#1}\  
47}{\vskip 1em} 
48 
49\newenvironment{restate-corollary}[2][{}]{\noindent\textbf{Corollary~{#2}}
50\;\textbf{#1}\  
51}{\vskip 1em} 
52 
53\newcommand{\myparagraph}[1]{\medskip\noindent\textbf{#1}} 
54 
55\newcommand{\Proofitemb}[1]{\medskip \noindent {\bf #1\;}} 
56\newcommand{\Proofitemfb}[1]{\noindent {\bf #1\;}} 
57\newcommand{\Proofitem}[1]{\medskip \noindent $#1\;$} 
58\newcommand{\Proofitemf}[1]{\noindent $#1\;$} 
59\newcommand{\Defitem}[1]{\smallskip \noindent $#1\;$} 
60\newcommand{\Defitemt}[1]{\smallskip \noindent {\em #1\;}} 
61\newcommand{\Defitemf}[1]{\noindent $#1\;$} 
62 
63 
64%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
65%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
66 
67 
68 
69%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
70%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
71 
72\newcommand{\eqdef}{=_{\text{def}}} 
73\newcommand{\concat}{\cdot}%%{\mathbin{+}}
74\newcommand{\Int}{\mathit{int}} 
75\newcommand{\nat}{\mathit{nat}} 
76\newcommand{\String}{\mathit{string}} 
77\newcommand{\Ident}{\mathit{ident}} 
78\newcommand{\Block}{\mathit{block}} 
79\newcommand{\Signature}{\mathit{signature}} 
80 
81\newcommand{\pc}{\mathit{pc}} 
82\newcommand{\estack}{\mathit{estack}} 
83\newcommand{\Error}{\epsilon} 
84 
85%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
86%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
87 
88% --------------------------------------------------------------------- %
89% Proof rule.                                                           %
90% --------------------------------------------------------------------- %
91 
92\newcommand{\staterule}[3]{%
93  $\begin{array}{@{}l}%
94   \mbox{#1}\\%
95   \begin{array}{c}
96   #2\\ 
97   \hline 
98   \raisebox{0ex}[2.5ex]{\strut}#3%
99   \end{array}
100  \end{array}$} 
101 
102\newcommand{\GAP}{2ex} 
103 
104\newcommand{\recall}[2]{%
105 $\begin{array}{c}
106 #1\\ 
107 \hline 
108 \raisebox{0ex}[2.5ex]{\strut}#2%
109 \end{array}$} 
110 
111\newcommand{\hbra}{\noindent\hbox to \textwidth{\leaders\hrule height1.8mm
112depth-1.5mm\hfill}} 
113\newcommand{\hket}{\noindent\hbox to \textwidth{\leaders\hrule
114height0.3mm\hfill}} 
115\newcommand{\ratio}{.3} 
116 
117\newenvironment{display}[1]{\begin{tabbing} 
118  \hspace{1.5em} \= \hspace{\ratio\linewidth-1.5em} \= \hspace{1.5em} \= \kill 
119  \noindent\hbra\\[-.5em] 
120  {\ }\textsc{#1}\\[-.8ex] 
121  \hbox to \textwidth{\leaders\hrule height1.6mm depth-1.5mm\hfill}\\[-.8ex] 
122  }{\\[-.8ex]\hket 
123  \end{tabbing}} 
124 
125 
126\newcommand{\sbline}{\hfill\smash[t]{\rule[1.5em]{\textwidth}{0.2ex}
127\hfill\hspace*{0ex}}} 
128\newcommand{\sline}{\hfill\smash[t]{\rule[1.5em]{\textwidth}{0.1ex}
129\hfill\hspace*{0ex}}} 
130\newcommand{\sentry}[2]{\>$#1$\>\ \smash[t]{\vrule width 0.2mm height
131    1.2\baselineskip depth 1.5\baselineskip}\>#2} 
132 
133\newcommand{\entry}[2]{\>$#1$\>\>#2} 
134\newcommand{\clause}[2]{$#1$\>\>#2} 
135\newcommand{\category}[2]{\clause{#1::=}{#2}} 
136\newcommand{\subclause}[1]{\>\>\>#1} 
137\newcommand{\redrule}[3]{$#1$\>\>$#2$\>\>\>#3} 
138 
139%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
140%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
141 
142% environments
143
144% To be commented for LNCS
145 \newtheorem{theorem}{Theorem} 
146 \newtheorem{fact}[theorem]{Fact} 
147 \newtheorem{definition}[theorem]{Definition} 
148 \newtheorem{lemma}[theorem]{Lemma} 
149 \newtheorem{corollary}[theorem]{Corollary} 
150 \newtheorem{proposition}[theorem]{Proposition} 
151 \newtheorem{example}[theorem]{Example} 
152 \newtheorem{exercise}[theorem]{Exercise} 
153 \newtheorem{remark}[theorem]{Remark} 
154 \newtheorem{question}[theorem]{Question}
155 \newtheorem{proviso}[theorem]{Proviso} 
156  \newtheorem{conjecture}[theorem]{Conjecture}
157
158% proofs 
159 
160\newcommand{\Proof}{\noindent {\sc Proof}. } 
161\newcommand{\Proofhint}{\noindent {\sc Proof hint}. } 
162% To be commented for LNCS
163 \newcommand{\qed}{\hfill${\Box}$} 
164\newcommand{\EndProof}{\qed}
165 
166% figure environment
167 
168\newcommand{\Figbar}{{\center \rule{\hsize}{0.3mm}}}
169 %horizontal thiner line for figures
170\newenvironment{figureplr}[1][t]{\begin{figure}[#1] \Figbar}{\Figbar \end{figure}}
171%environment for figures
172%       ************Macros for mathematical symbols*************
173% Style
174 
175\newcommand{\cl}[1]{{\cal #1}}          % \cl{R} to make R calligraphic
176\newcommand{\la}{\langle}               % the brackets for pairing (see also \pair)
177\newcommand{\ra}{\rangle} 
178 
179\newcommand{\lf}{\lfloor} 
180\newcommand{\rf}{\rfloor} 
181\newcommand{\ul}[1]{\underline{#1}}     % to underline
182\newcommand{\ol}[1]{\overline{#1}}      % to overline
183\newcommand{\ok}{~ok}                   % well formed context
184 
185% Syntax
186 
187\newcommand{\Gives}{\vdash}             % in a type judgment
188\newcommand{\IGives}{\vdash_{I}}        % intuitionistic provability
189\newcommand{\AIGives}{\vdash_{{\it AI}}} %affine-intuitionistic provability
190\newcommand{\CGives}{\vdash_{C}}        % affine-intuitionistic confluent provability
191
192
193\newcommand{\Models}{\mid \! =}              % models
194
195\newcommand{\emptycxt}{\On}              % empty context
196\newcommand{\subs}[2]{[#1 / #2]} 
197\newcommand{\sub}[2]{[#2 / #1]}         % substitution \sub{x}{U} gives [U/x]
198 
199\newcommand{\Sub}[3]{[#3 / #2]#1}       % Substitution with three arguments \Sub{V}{x}{U}
200
201\newcommand{\lsub}[2]{#2 / #1}          % substitution \lsub{x}{U} gives U/x, to  be used in a list.
202 
203\newcommand{\impl}{\supset} 
204\newcommand{\arrow}{\rightarrow}        % right thin arrow
205\newcommand{\trarrow}{\stackrel{*}{\rightarrow}}        % trans closure
206%\newcommand{\limp}{\makebox[5mm]{\,$- \! {\circ}\,$}}   % linear
207                                % implication
208\newcommand{\limp}{\multimap} %linear implication
209\newcommand{\bang}{\, !} 
210% LNCS
211%\newcommand{\bang}{\oc}
212\newcommand{\limpe}[1]{\stackrel{#1}{\multimap}}
213\newcommand{\hyp}[3]{#1:(#2, #3)}
214\newcommand{\letm}[3]{{\sf let} \ ! #1 = #2 \ {\sf in} \ #3}    % modal let
215\newcommand{\lets}[3]{{\sf let} \ #1 = #2 \ {\sf in} \ #3}    % simple let
216\newcommand{\letp}[3]{{\sf let} \ \S #1 = #2 \ {\sf in} \ #3}    % paragraph let
217\newcommand{\tertype}{{\bf 1}}
218\newcommand{\behtype}{{\bf B}}
219\newcommand{\bt}[1]{{\it BT}(#1)}       % Boehm tree
220\newcommand{\cxt}[1]{#1[~]}             % Context with one hole
221\newcommand{\pr}{\parallel}             % parallel ||
222\newcommand{\Nat}{\mathbf{N}}                 % natural numbers
223\newcommand{\Natmax}{\mathbf{N}_{{\it max}}}  % natural numbers with minus infinity
224\newcommand{\Rat}{\mathbf{Q}^{+}}                 % non-negative rationals
225\newcommand{\Ratmax}{\mathbf{Q}^{+}_{{\it max}}}  % non-negative rationals with minus infinity
226\newcommand{\Alt}{ \mid\!\!\mid  } 
227\newcommand{\isum}{\oplus} 
228\newcommand{\csum}{\uplus}              %context sum
229\newcommand{\dpar}{\mid\!\mid} 
230                                        % for the production of a grammar containing \mid
231\newcommand{\infer}[2]{\begin{array}{c} #1 \\ \hline #2 \end{array}} 
232                                        % to make a centered inference rule
233 
234% (Meta-)Logic
235 
236\newcommand{\bool}{{\sf bool}}          % boolean values
237\newcommand{\Or}{\vee}                  % disjunction
238\newcommand{\OR}{\bigvee}               % big disjunction
239\newcommand{\AND}{\wedge}               % conjunction
240\newcommand{\ANDD}{\bigwedge}           % big conjunction
241\newcommand{\Arrow}{\Rightarrow}        % right double arrow
242\newcommand{\IFF}{\mbox{~~iff~~}}       % iff in roman and with spaces
243\newcommand{\iffArrow}{\Leftrightarrow} % logical equivalence
244 
245% Semantics
246 
247\newcommand{\dl}{[\![}                  % semantic [[
248\newcommand{\dr}{]\!]}                  % semantic ]]
249\newcommand{\lam}{{\bf \lambda}}        % semantic lambda
250
251
252% The equivalences for this paper
253
254% the usual ones
255\newcommand{\ubis}{\approx^u}          % usual labelled weak bis
256\newcommand{\uabis}{\approx^{u}_{ccs}} % usual labelled weak bis on CCS
257
258% the contextual conv sensitive
259\newcommand{\cbis}{\approx}        % convergence sensitive bis
260\newcommand{\cabis}{\approx_{ccs}}  % convergence sensitive bis on CCS
261
262% the labelled conv sensitive
263\newcommand{\lcbis}{\approx^{\ell}} %
264\newcommand{\lcabis}{\approx^{\ell}_{ccs}} % labelled convergence sensitive bis on CCS
265\newcommand{\lcbiswrong}{\approx^{\ell \Downarrow}} %
266
267
268
269
270
271
272
273\newcommand{\maytest}{=_{\Downarrow}}
274\newcommand{\musttest}{=_{\Downarrow_{S}}}
275
276
277 
278 
279% Sets
280 
281\newcommand{\prt}[1]{{\cal P}(#1)}      % Parts of a set
282\newcommand{\finprt}[1]{{\cal P}_{fin}(#1)}% Finite parts
283\newcommand{\finprtp}[1]{{\cal P}_{fin}^{+}(#1)}% Non-empty Finite parts
284\newcommand{\union}{\cup}               % union
285\newcommand{\inter}{\cap}               % intersection
286\newcommand{\Union}{\bigcup}            % big union
287\newcommand{\Inter}{\bigcap}            % big intersection
288\newcommand{\cpl}[1]{#1^{c}}            % complement
289\newcommand{\card}{\sharp}              % cardinality
290\newcommand{\minus}{\backslash}         % set difference
291\newcommand{\sequence}[2]{\{#1\}_{#2}}  % ex. \sequence{d_n}{n\in \omega}
292\newcommand{\comp}{\circ}               % functional composition
293%\newcommand{\oset}[1]{\{#1\}}            % set enumeration
294\newcommand{\mset}[1]{\{\! | #1 |\!\}}  % pseudo-set notation {| |}
295 
296% Domains
297 
298\newcommand{\two}{{\bf O}}              % Sierpinski space
299\newcommand{\join}{\vee}                % join
300\newcommand{\JOIN}{\bigvee}             % big join 
301\newcommand{\meet}{\wedge}              % meet
302\newcommand{\MEET}{\bigwedge}           % big meet
303\newcommand{\dcl}{\downarrow}           % down closure
304\newcommand{\ucl}{\uparrow}             % up closure
305\newcommand{\conv}{\downarrow}          % synt. conv. pred. (postfix)
306\newcommand{\diver}{\uparrow}           % diverging term
307\newcommand{\Conv}{\Downarrow}          % sem. conv. pred. (postfix)
308\newcommand{\SConv}{\Downarrow_{S}}          % sem. conv. pred. (postfix)
309\newcommand{\CConv}{\Downarrow_{C}}
310\newcommand{\Diver}{\Uparrow}           % diverging map
311\newcommand{\cpt}[1]{{\cal K}(#1)}      % compacts, write \cpt{D}
312\newcommand{\ret}{\triangleleft}        % retract
313\newcommand{\nor}{\succeq}
314\newcommand{\prj}{\underline{\ret}}     % projection
315\newcommand{\parrow}{\rightharpoonup}   % partial function space
316\newcommand{\ub}[1]{{\it UB}(#1)}       % upper bounds
317\newcommand{\mub}[1]{{\it MUB}(#1)}     % minimal upper bounds
318\newcommand{\lift}[1]{(#1)_{\bot}}      % lifting
319\newcommand{\forget}[1]{\underline{#1}} % forgetful translation
320
321%\newcommand{\rel}[1]{\;{\cal #1}\;}     % infix relation (calligraphic)
322\newcommand{\rl}[1]{\;{\cal #1}\;}             % infix relation
323\newcommand{\rel}[1]{{\cal #1}}         %calligraphic relation with no
324                                        %extra space
325\newcommand{\per}[1]{\;#1 \;} 
326\newcommand{\wddagger}{\natural}  % weak suspension
327%\newcommand{\wddagger}{=\!\!\!\!\parallel}  % weak suspension
328% Categories
329 
330\newcommand{\pair}[2]{\langle #1 , #2 \rangle} % pairing \pair{x}{y}, do not use < >.
331 
332%               *******  Notation for the $\pi$-calculus *********
333% Syntax:
334 
335 
336\newcommand{\fn}[1]{{\it fn}(#1)}                       % free names
337\newcommand{\bn}[1]{{\it bn}(#1)}                       % bound names
338\newcommand{\names}[1]{{\it n}(#1)}                     % names
339\newcommand{\true}{{\sf t}}                             % true
340\newcommand{\false}{{\sf f}}                            % false
341\newcommand{\pio}{\pi_1}                                % 1 receptor calculus
342\newcommand{\pioo}{\pi_{1}^{r}} 
343\newcommand{\piom}{\pi_{1}^{-}}                         % 1 receptor calculus wo match
344\newcommand{\pioi}{\pi_{1I}}                    % 1 receptor I-calculus
345\newcommand{\pifo}{\pi_{\w{1f}}}                                % functional calculus
346\newcommand{\pilo}{\pi_{\w{1l}}}                                % located calculus
347\newcommand{\sort}[1]{{\it st}(#1)}                     % sort
348\newcommand{\ia}[1]{{\it ia}(#1)}                     % sort
349\newcommand{\ite}[3]{{\sf if~} #1 {\sf ~then~} #2 {\sf ~else~} #3}      %if then else
350\newcommand{\casep}[2]{{\sf case}^{\times}(#1, \pair{x}{y}\Arrow#2)}      %case on pairs
351\newcommand{\casel}[3]{{\sf case}^{L}(#1, #2, \s{cons}(x,y)\Arrow#3)}      %case on lists
352\newcommand{\caseb}[3]{{\sf case}^{b}(#1, #2, \s{cons}(x,y)\Arrow#3)}      %case on lists
353\newcommand{\nil}{{\sf nil}} 
354\newcommand{\cons}{{\sf cons}} 
355\newcommand{\idle}[1]{{\it Idle}(#1)}                   %idle process
356\newcommand{\conf}[1]{\{ #1 \}}                         %configuration
357\newcommand{\link}[2]{#1 \mapsto #2}                    %likn a ->b
358\newcommand{\mand}{\mbox{ and }} 
359\newcommand{\dvec}[1]{\tilde{{\bf #1}}}                 %double vector
360\newcommand{\erloc}[1]{{\it er}_{l}(#1)}                % location erasure
361\newcommand{\w}[1]{{\it #1}}    %To write in math style
362\newcommand{\vcb}[1]{{\bf #1}} 
363\newcommand{\lc}{\langle\!|} 
364\newcommand{\rc}{|\!\rangle} 
365\newcommand{\obj}[1]{{\it obj}(#1)} 
366\newcommand{\move}[1]{{\sf move}(#1)} 
367\newcommand{\qqs}[2]{\forall\, #1\;\: #2} 
368\newcommand{\qtype}[4]{\forall #1 :  #2 . (#4,#3)} 
369\newcommand{\xst}[2]{\exists\, #1\;\: #2} 
370\newcommand{\xstu}[2]{\exists\, ! #1\;\: #2} 
371\newcommand{\dpt}{\,:\,} 
372\newcommand{\cond}[3]{\mathsf{if}\ #1\ \mathsf{then}\ #2\ \mathsf{else}\ #3} 
373\newcommand{\s}[1]{{\sf #1}}    % sans-serif 
374\newcommand{\vc}[1]{{\bf #1}} 
375\newcommand{\lnorm}{\lbrack\!\lbrack} 
376\newcommand{\rnorm}{\rbrack\!\rbrack} 
377\newcommand{\sem}[1]{\underline{#1}} 
378\newcommand{\tra}[1]{\langle #1 \rangle}
379\newcommand{\trb}[1]{[ #1 ]}
380\newcommand{\squn}{\mathop{\scriptstyle\sqcup}} 
381\newcommand{\lcro}{\langle\!|} 
382\newcommand{\rcro}{|\!\rangle} 
383\newcommand{\semi}[1]{\lcro #1\rcro} 
384\newcommand{\sell}{\,\ell\,} 
385\newcommand{\SDZ}[1]{\marginpar{\textbf{SDZ:} {#1}}} 
386 
387\newcommand{\when}[3]{{\sf when}~#1~{\sf then}~#2~{\sf else}~#3} 
388\newcommand{\wthen}[2]{{\sf when}~#1~{\sf then}~#2~} 
389\newcommand{\welse}[1]{{\sf else}~#1} 
390
391%Pour la fleche double, il faut rajouter :
392%      \usepackage{mathtools}
393
394\newcommand{\act}[1]{\xrightarrow{#1}} %labelled actionlow %high
395
396\newcommand{\lact}[1]{\stackrel{#1}{\makebox[5mm]{\,$- \! {\circ}\,$}}}
397
398\newcommand{\set}[1]{\{#1\}}
399\newcommand{\pst}[2]{{\sf pset}(#1,#2)}
400\newcommand{\st}[2]{{\sf set}(#1,#2)}
401\newcommand{\wrt}[2]{{\sf w}(#1,#2)}
402
403\newcommand{\chtype}[2]{{\it Ch_{#1}(#2)}}
404\newcommand{\rgtype}[2]{{\it {\sf Reg}_{#1} #2}}
405
406\newcommand{\get}[1]{{\sf get}(#1)}
407
408%\newcommand{\wact}[1]{\xRightarrow{#1}} %weak labelled action low high
409
410%\newcommand{\mact}[1]{\xrightarrow{#1}_{m}} %labelled action low %high
411
412%\newcommand{\wmact}[1]{\xRightarrow{#1}_{m}} %weak labelled action low high
413
414%\newcommand{\act}[1]{\stackrel{#1}{\rightarrow}} %labelled action low
415                                %%%high
416
417\newcommand{\acteq}[1]{\stackrel{#1}{\leadsto}} %labelled action low
418                                %%%high
419
420
421%\newcommand{\actI}[1]{\stackrel{#1}{\rightarrow_{1}}} %labelled action low
422\newcommand{\actI}[1]{\xrightarrow{#1}_{1}}
423
424%\newcommand{\actII}[1]{\stackrel{#1}{\rightarrow_{2}}} %labelled action low
425\newcommand{\actII}[1]{\xrightarrow{#1}_{2}}
426
427
428 \newcommand{\wact}[1]{\stackrel{#1}{\Rightarrow}} %weak labelled action low high
429\newcommand{\wactI}[1]{\stackrel{#1}{\Rightarrow_{1}}} %weak labelled action low high
430\newcommand{\wactII}[1]{\stackrel{#1}{\Rightarrow_{2}}} %weak labelled action low high
431
432
433\newcommand{\mact}[1]{\stackrel{#1}{\rightarrow_{m}}} %labelled action low
434%high
435\newcommand{\wmact}[1]{\stackrel{#1}{\Rightarrow_{m}}} %weak labelled action low high
436 
437%\newcommand{\lact}[1]{\stackrel{#1}{\leftarrow}}
438\newcommand{\lwact}[1]{\stackrel{#1}{\Leftarrow}} 
439 
440 
441 
442\newcommand{\eval}{\Downarrow} 
443\newcommand{\Eval}[1]{\Downarrow^{#1}} 
444 
445 
446\newcommand{\Z}{{\bf Z}} 
447\newcommand{\Real}{\mathbb{R}^{+}} 
448\newcommand{\Return}{\ensuremath{\mathtt{return}}\xspace}                 
449\newcommand{\Stop}{\ensuremath{\mathtt{stop}}\xspace} 
450\newcommand{\Wait}{\ensuremath{\mathtt{wait}}\xspace} 
451\newcommand{\Read}{\ensuremath{\mathtt{read}}\xspace} 
452\newcommand{\Write}{\ensuremath{\mathtt{write}}\xspace} 
453\newcommand{\Yield}{\ensuremath{\mathtt{yield}}\xspace} 
454\newcommand{\Next}{\ensuremath{\mathtt{next}}\xspace} 
455\newcommand{\Load}{\ensuremath{\mathtt{load}}\xspace} 
456\newcommand{\Call}{\ensuremath{\mathtt{call}}\xspace} 
457\newcommand{\Tcall}{\ensuremath{\mathtt{tcall}}\xspace} 
458\newcommand{\Pop}{\ensuremath{\mathtt{pop}}\xspace} 
459\newcommand{\Build}{\ensuremath{\mathtt{build}}\xspace} 
460\newcommand{\Branch}{\ensuremath{\mathtt{branch}}\xspace} 
461\newcommand{\Goto}{\ensuremath{\mathtt{goto}}\xspace} 
462 
463\newcommand{\hatt}[1]{#1^{+}} 
464\newcommand{\Of}{\mathbin{\w{of}}} 
465 
466\newcommand{\susp}{\downarrow} 
467\newcommand{\lsusp}{\Downarrow_L} 
468\newcommand{\wsusp}{\Downarrow} 
469\newcommand{\commits}{\searrow} 
470 
471 
472\newcommand{\spi}{S\pi} 
473 
474
475 \newcommand{\pres}[2]{#1\triangleright #2} %TCCS else next (alternative)
476% \newcommand{\pres}[2]{ \lfloor #1 \rfloor (#2)}  %TCCS else next
477\newcommand{\present}[3]{{\sf present} \ #1 \ {\sf do } \ #2 \ {\sf  else} \ #3}
478
479
480\newcommand{\tick}{{\sf tick}}          %tick action
481
482 
483 
484\newcommand{\sbis}{\equiv_L} 
485\newcommand{\emit}[2]{\ol{#1}#2} 
486%\newcommand{\present}[4]{#1(#2).#3,#4}
487\newcommand{\match}[4]{[#1=#2]#3,#4}       %pi-equality
488
489\newcommand{\matchv}[4]{[#1 \unrhd #2]#3,#4}
490
491\newcommand{\new}[2]{\nu #1 \ #2} 
492\newcommand{\outact}[3]{\new{{\bf #1}}{\emit{#2}{#3}}} 
493\newcommand{\real}{\makebox[5mm]{\,$\|\!-$}}% realizability relation
494
495\newcommand{\regterm}[2]{{\sf reg}_{#1} #2}
496\newcommand{\thread}[1]{{\sf thread} \ #1}
497\newcommand{\store}[2]{(#1 \leftarrow #2)}
498\newcommand{\pstore}[2]{(#1 \Leftarrow #2)}
499
500\newcommand{\regtype}[2]{{\sf Reg}_{#1} #2}
501\newcommand{\uregtype}[3]{{\sf Reg}_{#1}(#2, #3)}
502\newcommand{\urtype}[2]{{\sf Reg}(#1, #2)}
503
504\newcommand{\upair}[2]{[#1,#2]}
505\newcommand{\letb}[3]{\mathsf{let}\;\oc #1 = #2\;\mathsf{in}\;#3}
506
507\newcommand{\vlt}[1]{{\cal V}(#1)}
508\newcommand{\prs}[1]{{\cal P}(#1)}
509
510\newcommand{\imp}{{\sf Imp}}            %imp language
511\newcommand{\vm}{{\sf Vm}}              %virtual machine language
512\newcommand{\mips}{{\sf Mips}}          %Mips language
513\newcommand{\C}{{\sf C}}                % C language
514\newcommand{\Clight}{{\sf Clight}}        %C light language
515\newcommand{\Cminor}{{\sf Cminor}}
516\newcommand{\RTLAbs}{{\sf RTLAbs}}
517\newcommand{\RTL}{{\sf RTL}}
518\newcommand{\ERTL}{{\sf ERTL}}
519\newcommand{\LTL}{{\sf LTL}}
520\newcommand{\LIN}{{\sf LIN}}
521\newcommand{\access}[1]{\stackrel{#1}{\leadsto}}
522\newcommand{\ocaml}{{\sf ocaml}}
523\newcommand{\coq}{{\sf Coq}}
524\newcommand{\compcert}{{\sf CompCert}}
525%\newcommand{\cerco}{{\sf CerCo}}
526\newcommand{\cil}{{\sf CIL}}
527\newcommand{\scade}{{\sf Scade}}
528\newcommand{\absint}{{\sf AbsInt}}
529\newcommand{\framac}{{\sf Frama-C}}
530\newcommand{\powerpc}{{\sf PowerPc}}
531\newcommand{\lustre}{{\sf Lustre}}
532\newcommand{\esterel}{{\sf Esterel}}
533\newcommand{\ml}{{\sf ML}}
534
535\newcommand{\codeex}[1]{\texttt{#1}}   % code example
536
537\bibliographystyle{abbrv} 
538
539\title{
540INFORMATION AND COMMUNICATION TECHNOLOGIES\\
541(ICT)\\
542PROGRAMME\\
543\vspace*{1cm}Project FP7-ICT-2009-C-243881 \cerco{}}
544
545\date{ }
546\author{}
547%>>>>>> new commands used in this document
548% \usepackage[nohyperref,nosvn]{mystyle}
549
550\usepackage{multirow}
551\newcolumntype{b}{@{}>{{}}}
552\newcolumntype{B}{@{}>{{}}c<{{}}@{}}
553\newcolumntype{h}[1]{@{\hspace{#1}}}
554\newcolumntype{L}{>{$}l<{$}}
555\newcolumntype{C}{>{$}c<{$}}
556\newcolumntype{R}{>{$}r<{$}}
557\newcolumntype{S}{>{$(}r<{)$}}
558\newcolumntype{n}{@{}}
559\newcommand{\spanr}[2]{\multicolumn{1}{Rn}{\multirow{#1}{*}{(#2)}}}
560\def\nocol{\multicolumn{1}{ncn}{}}
561
562\usepackage[disable, colorinlistoftodos]{todonotes}
563\usepackage{enumerate}
564\usepackage{tikz}
565
566\newcommand{\tern}[3]{#1\mathrel ? #2 : #3}
567\newcommand{\sop}[1]{\s{#1}\ }
568\newcommand{\sbin}[1]{\ \s{#1}\ }
569\newcommand{\Ell}{\mathcal L}
570\newcommand{\alphab}{\boldsymbol\alpha}
571\newcommand{\betab}{\boldsymbol\beta}
572\newcommand{\gramm}{\mathrel{::=}}
573\newcommand{\ass}{\mathrel{:=}}
574
575\renewcommand{\to}[1][]{\stackrel{#1}{\rightarrow}}
576
577\newcommand{\eg}{\emph{e.g.\ }}
578\newcommand{\ie}{\emph{i.e.\ }}
579
580\newcommand{\inde}{\hspace{20pt}}
581
582\usetikzlibrary{decorations.pathreplacing}
583\newcommand{\tikztarget}[2]{%
584  \tikz[remember picture, baseline={(#1.base)}]{
585  \node (#1) [inner sep = 0pt]{#2};}}
586\newcommand{\tikztargetm}[2]{%
587  \tikz[remember picture, baseline={(#1.base)}]{
588  \node (#1) [inner sep = 0pt]{$#2$};}}
589%<<<<<<<<<<<<
590\title{Indexed Labels for Loop Iteration Dependent Costs}
591\author{Paolo Tranquilli}
592\begin{document}
593\maketitle
594\begin{abstract}
595We present an extension to the labelling approach to lift resource
596consumption information from compiled to source code~\cite{D2.1}. Such an
597approach consists in inserting cost labels at key points of the source code and
598keeping track of them during compilation. However, the plain labelling approach
599looses preciseness when differences
600arise as to the cost of the same portion of code, whether due to code
601transformation such as loop optimisation or advanced architecture features
602(\eg cache). Our approach addresses this weakness, allowing to retain
603preciseness even when applying some loop transformations
604that rearrange the iterations of a loop (namely loop peeling and unrolling). It
605consists in formally indexing cost labels with the iterations of the containing
606loops they occur in within the source code. These indexes can be transformed
607during the compilation, and when lifted back to source code they produce dependent
608costs.
609
610The proposed changes have been implemented in CerCo's untrusted prototype compiler
611from a large fragment of C to 8051 assembly~\cite{D2.2}.
612\end{abstract}
613
614
615\listoftodos
616
617\section{Introduction}
618In~\cite{D2.1}, Armadio \emph{et al} propose an approach for building a compiler for a large fragment of the \textsc{c} programming language.
619The novelty of their proposal lies in the fact that their proposed design is capable of lifting execution cost information from the compiled code and presenting it to the user.
620This idea is foundational for the CerCo project, which strives to produce a mechanically certified version of such a compiler.
621
622To summarise, Armadio's proposal consisted of `decorations' on the source code, along with the insertion of labels at key points.
623These labels are preserved as compilation progresses, from one intermediate language to another.
624Once the final object code is produced, such labels should correspond to the parts of the compiled code that have a constant cost.
625
626Two properties must hold of any cost estimate.
627The first property, paramount to the correctness of the method, is \emph{soundness}, that is, that the actual execution cost is bounded by the estimate.
628In the labelling approach, this is guaranteed if every loop in the control flow of the compiled code passes through at least one cost label.
629The second property, optional but desirable, is \emph{preciseness}: the estimate \emph{is} the actual cost.
630In the labelling approach, this will be true if, for every label, every possible execution of the compiled code starting from such a label yields the same cost before hitting another one.
631In simple architectures such as the 8051 micro-controller this can be guaranteed by placing labels at the start of any branch in the control flow, and by ensuring that no labels are duplicated.
632
633The reader should note that the above mentioned requirements must hold when executing the code obtained at the end of the compilation chain.
634So even if one is careful about injecting the labels at suitable places in the source code, the requirements might still fail because of two main obstacles:
635\begin{itemize}
636\item
637The compilation process introduces important changes in the control flow, inserting loops or branches.
638For example, the insertion of functions in the source code replacing instructions that are unavailable in the target architecture.
639This require loops to be inserted (for example, for multi-word division and generic shift in the 8051 architecture), or effort spent in providing unbranching translations of higher level instructions~\cite{D2.2}.
640\item
641Even when the compiled code \emph{does}---as far as the the syntactic control flow graph is concerned---respect the conditions for soundness and preciseness, the cost of blocks of instructions might not be independent of context, so that different passes through a label might have different costs.
642This becomes a concern if one wishes to apply the approach to more complex architectures, for example one with caching or pipelining.
643\end{itemize}
644The first point unveils a weakness of the current labelling approach when it comes to some common code transformations performed along a compilation chain.
645In particular, most \emph{loop optimisations} are disruptive, in the sense outlined in the first bulletpoint above.
646An example optimisation of this kind is \emph{loop peeling}.
647This optimisation is employed by compilers in order to trigger other optimisations, for example, dead code elimination or invariant code motion.
648Here, a first iteration of the loop is hoisted out of the body of the loop, possibly being assigned a different cost than later iterations.
649
650The second bulletpoint above highlights another weakness. Different tools allow to predict up to a certain extent the behaviour of cache.
651For example, the well known tool \s{aiT}~\cite{absint}---based on abstract interpretation---allows the user to estimate the worst-case execution time (\textsc{wcet}) of a piece of source code, taking into account advanced features of the target architecture. While
652such a tool is not fit for a compositional approach which is central to CerCo's project\footnote{\s{aiT} assumes the cache is empty at the start of computation, and treats each procedure call separately, unrolling a great part of the control flow.},
653\s{aiT}'s ability to produce tight estimates of execution costs would sthill enhance the effectiveness of the CerCo compiler, \eg{} by integrating such techniques in its development.
654A typical case where cache analysis yields a difference in the execution cost of a block is in loops: the first iteration will usually stumble upon more cache misses than subsequent iterations.
655
656If one looks closely, the source of the weakness of the labelling approach as presented in~\cite{D2.1} is common to both points: the inability to state different costs for different occurrences of labels, where the difference might be originated by labels being duplicated along the compilation, or the costs being sensitive to the current state of execution.
657The preliminary work we present here addresses this weakness by introducing cost labels that are dependent on which iteration of its containing loops it occurs in.
658This is achieved by means of \emph{indexed labels}; all cost labels are decorated with formal indices coming from the loops containing such labels.
659These indices allow us to rebuild, even after multiple loop transformations, which iterations of the original loops in the source code a particular label occurrence belongs to.
660During the annotation stage of the source code, this information is presented to the user by means of \emph{dependent costs}.
661
662We concentrate on integrating the labelling approach with two loop transformations.
663For general information on general compiler optimisations (and loop optimisations in particular) we refer the reader to the vast literature on the subject (\eg\cite{muchnick,morgan}).
664
665\paragraph{Loop peeling}
666As already mentioned, loop peeling consists in preceding the loop with a copy of its body, appropriately guarded.
667This is used, in general, to trigger further optimisations, such as those that rely on execution information which can be computed at compile time, but which is erased by further iterations of the loop, or those that use the hoisted code to be more effective at eliminating redundant code.
668Integrating this transformation in to the labelling approach would also allow the integration of the common case of cache analysis explained above; the analysis of cache hits and misses usually benefits from a form of \emph{virtual} loop peeling~\cite{cacheprediction}.
669
670\paragraph{Loop unrolling}
671This optimisation consists of the repetition of several copies of the body of the loop inside the loop itself (inserting appropriate guards, or avoiding them altogether if enough information about the loop's guard is available at compile time).
672This can limit the number of (conditional or unconditional) jumps executed by the code and trigger further optimisations dealing with pipelining, if appropriate for the architecture.
673\\\\
674Whilst we cover only two loop optimisations in this report, we argue that the work presented herein poses a good foundation for extending the labelling approach, in order to cover more and more common optimisations, as well as gaining insight into how to integrate advanced cost estimation techniques, such as cache analysis, into the CerCo compiler.
675Moreover loop peeling itself has the fortuitous property of enhancing and enabling other optimisations.
676Experimentation with CerCo's untrusted prototype compiler, which implements constant propagation and partial redundancy elimination~\cite{PRE,muchnick}, show how loop peeling enhances those other optimisations.
677
678\paragraph{Outline}
679We will present our approach on a minimal `toy' imperative language, \imp{} with \s{goto}s, which we present in Section~\ref{sec:defimp} along with formal definitions of the loop transformations.
680This language already presents most of the difficulties encountered when dealing with \textsc{c}, so we stick to it for the sake of this presentation.
681In Section~\ref{sec:labelling} we summarize the labelling approach as presented in~\cite{D2.1}.
682Section~\ref{sec:indexedlabels} presents \emph{indexed labels}, our proposal for dependent labels which are able to describe precise costs even in the presence of the various loop transformations we consider.
683Finally Section~\ref{sec:conc} goes into more detail regarding the implementation of indexed labels in CerCo's untrusted compiler and speculates on further work on the subject.
684
685\section{\imp{} with goto}\label{sec:defimp}
686We briefly outline the toy language, \imp{} with \s{goto}s.
687The language was designed in order to pose problems for the existing labelling approach, and as a testing ground for our new notion of indexed labels.
688
689The syntax and operational semantics of our toy language are presented in~\ref{fig:minimp}.
690Note, we may augment the language further, with \s{break} and \s{continue}, at no further expense.
691\begin{figureplr}
692$$\begin{gathered}
693\begin{array}{nlBl>(R<)n}
694\multicolumn{4}{C}{\bfseries Syntax}\\
695\multicolumn{4}{ncn}{
696  \ell,\ldots \hfill \text{(labels)} \hfill x,y,\ldots \hfill
697\text{(identifiers)}
698\hfill e,f,\ldots \hfill \text{(expression)}
699}\\
700P,S,T,\ldots &\gramm& \s{skip} \mid s;t
701\mid \sop{if}e\sbin{then}s\sbin{else}t
702\mid \sop{while} e \sbin{do} s \mid
703  x \ass e
704\\&\mid&
705\ell : s \mid \sop{goto}\ell& \spanr{-2}{statements}\\
706\\
707\multicolumn{4}{C}{\bfseries Semantics}\\
708K,\ldots  &\gramm& \s{halt} \mid S \cdot K & continuations
709\end{array}
710\\[15pt]
711\s{find}(\ell,S,K) \ass
712\left\{\begin{array}{lL}
713\bot & if $S=\s{skip},\sop{goto} \ell'$ or $x\ass e$,\\
714(T, K) & if $S=\ell:T$,\\
715\s{find}(\ell,T,K) & otherwise, if $S = \ell':T$,\\
716\s{find}(\ell,T_1,T_2\cdot K) & if defined and $S=T_1;T_2$,\\
717\s{find}(\ell,T_1,K) & if defined and
718$S=\sop{if}b\sbin{then}T_1\sbin{else}T_2$,\\
719\s{find}(\ell,T_2,K) & otherwise, if $S=T_1;T_2$ or
720$\sop{if}b\sbin{then}T_1\sbin{else}T_2$,\\
721\s{find}(\ell,T,S\cdot K) & if $S = \sop{while}b\sbin{do}T$.
722\end{array}\right.
723\\[15pt]
724\begin{array}{lBl}
725(x:=e,K,s)  &\to_P& (\s{skip},K,s[v/x]) \qquad\mbox{if }(e,s)\eval v \\ \\
726
727(S;T,K,s)  &\to_P& (S,T\cdot K,s) \\ \\
728
729(\s{if} \ b \ \s{then} \ S \ \s{else} \ T,K,s)
730&\to_P&\left\{
731\begin{array}{ll}
732(S,K,s) &\mbox{if }(b,s)\eval v \neq 0 \\
733(T,K,s) &\mbox{if }(b,s)\eval 0
734\end{array}
735\right. \\ \\
736
737
738(\s{while} \ b \ \s{do} \ S ,K,s)
739&\to_P&\left\{
740\begin{array}{ll}
741(S,\s{while} \ b \ \s{do} \ S \cdot K,s) &\mbox{if }(b,s)\eval v \neq 0 \\
742(\s{skip},K,s) &\mbox{if }(b,s)\eval 0
743\end{array}
744\right. \\ \\
745
746
747(\s{skip},S\cdot K,s)  &\to_P&(S,K,s) \\ \\
748
749(\ell : S, K, s)  &\to_P& (S,K,s) \\ \\
750
751(\sop{goto}\ell,K,s)  &\to_P& (\s{find}(\ell,P,\s{halt}),s) \\ \\
752\end{array}
753\end{gathered}$$
754\caption{The syntax and operational semantics of \imp.}
755\label{fig:minimp}
756\end{figureplr}
757The precise grammar for expressions is not particularly relevant so we do not give one in full.
758For the sake of conciseness we also treat boolean and arithmetic expressions together (with the usual \textsc{c} convention of an expression being true iff non-zero).
759We may omit the \s{else} clause of a conditional if it leads to a \s{skip} statement.
760
761We will presuppose that all programs are \emph{well-labelled}, \ie every label labels at most one occurrence of a statement in a program, and every \s{goto} points to a label actually present in the program.
762The \s{find} helper function has the task of not only finding the labelled statement in the program, but also building the correct continuation.
763The continuation built by \s{find} replaces the current continuation in the case of a jump.
764
765\paragraph{Further down the compilation chain}
766We abstract over the rest of the compilation chain.
767We posit the existence of a suitable notion of `sequential instructions', wherein each instruction has a single natural successor to which we can add our own, for every language $L$ further down the compilation chain.
768
769\subsection{Loop transformations}
770We call a loop $L$ \emph{single-entry} in $P$ if there is no \s{goto} to $P$ outside of $L$ which jumps into $L$.\footnote{This is a reasonable aproximation: it defines a loop as multi-entry if it has an external but unreachable \s{goto} jumping into it.}
771Many loop optimisations do not preserve the semantics of multi-entry loops in general, or are otherwise rendered ineffective.
772Usually compilers implement a single-entry loop detection which avoids the multi-entry ones from being targeted by optimisations~\cite{muchnick,morgan}.
773The loop transformations we present are local, \ie they target a single loop and transform it.
774Which loops are targeted may be decided by some \emph{ad hoc} heuristic.
775However, the precise details of which loops are targetted and how is not important here.
776
777\paragraph{Loop peeling}
778$$
779\sop{while}b\sbin{do}S \mapsto \sop{if}b\sbin{then} S; \sop{while} b \sbin{do} S[\ell'_i/\ell_i]
780$$
781where $\ell'_i$ is a fresh label for any $\ell_i$ labelling a statement in $S$.
782This relabelling is safe for \s{goto}s occurring outside the loop because of the single-entry condition.
783Note that for \s{break} and \s{continue} statements, those should be replaced with \s{goto}s in the peeled body $S$.
784
785\paragraph{Loop unrolling}
786$$
787\sop{while}b\sbin{do}S\mapsto
788\sop{while} b \sbin{do} (S ;
789  \sop{if} b \sbin{then} (S[\ell^1_i/\ell_i] ;
790  \cdots
791  \sop{if} b \sbin{then} S[\ell^n_i/\ell_i]) \cdots)
792$$
793where $\ell^j_i$ are again fresh labels for any $\ell_i$ labelling a statement in $S$.
794This is a wilfully na\"{i}ve version of loop unrolling, which usually targets less general loops.
795The problem this transformation poses to CerCo's labelling approach are independent of the sophistication of the actual transformation.
796
797\begin{example}
798In \autoref{fig:example1} we show a program (a wilfully inefficient computation of of the
799sum of the first $n$ factorials) and a possible transformation of it, combining loop
800peeling and loop unrolling.
801\begin{figureplr}
802$$
803\fbox{$\begin{array}{l}
804s\ass 0;\\
805i\ass 0;\\
806\sop{while}i<n\sbin{do}\\
807\inde p\ass 1;\\
808\inde j\ass 1;\\
809\inde \sop{while}j \le i\sbin{do}\\
810\inde \inde p\ass j*p\\
811\inde \inde j\ass j+1;\\
812\inde s\ass s+p;\\
813\inde i\ass i+1;\\
814\end{array}
815$}
816\mapsto
817\fbox{$\begin{array}{l}
818s\ass 0;\\
819i\ass 0;\\
820\tikztargetm{a}{\s{if}}\ i<n\sbin{then}\\
821\inde p\ass 1;\\
822\inde j\ass 1;\\
823\inde \sop{while}j \le i\sbin{do}\\
824\inde \inde p\ass j*p\\
825\inde \inde j\ass j+1;\\
826\inde s\ass s+p;\\
827\inde i\ass i+1;\\
828\inde \tikztargetm{d}{\s{while}}\ i<n\sbin{do}\\
829\inde \inde p\ass 1;\\
830\inde \inde j\ass 1;\\
831\inde \inde \tikztargetm{b}{\s{if}}\ j \le i\sbin{then}\\
832\inde \inde \inde p\ass j*p\\
833\inde \inde \inde j\ass j+1;\\
834\inde \inde \inde \sop{if}j \le i\sbin{then}\\
835\inde \inde \inde \inde p\ass j*p\\
836\inde \inde \inde \inde j\ass j+1;\\
837\inde \inde \inde \inde \s{while}\ j \le i\sbin{do}\\
838\inde \inde \inde \inde \inde p\ass j*p\\
839\inde \inde \inde \inde \inde j\ass j+1;\\
840\inde \inde \inde \inde \inde \sop{if}j \le i\sbin{then}\\
841\inde \inde \inde \inde \inde \inde p\ass j*p\\
842\inde \inde \inde \inde \inde \inde \tikztargetm{c}j\ass j+1;\\
843\inde \inde s\ass s+p;\\
844\inde \inde i\ass i+1;\\
845\inde \inde \sop{if}i<n\sbin{then}\\
846\inde \inde \inde p\ass 1;\\
847\inde \inde \inde j\ass 1;\\
848\inde \inde \inde \tikztargetm{e}{\s{while}}\ j < i\sbin{do}\\
849\inde \inde \inde \inde p\ass j*p\\
850\inde \inde \inde \inde j\ass j+1;\\
851\inde \inde \inde \inde \s{if}\ j < i\sbin{then}\\
852\inde \inde \inde \inde \inde p\ass j*p\\
853\inde \inde \inde \inde \inde \tikztargetm{f}j\ass j+1;\\
854\inde \inde \inde s\ass s+p;\\
855\inde \inde \inde i\ass i+1\tikztargetm{g};{}
856\end{array}
857$}\tikztargetm{right}{}
858\begin{tikzpicture}[overlay, remember picture, thick,
859brace/.style = {decorate, decoration={brace, amplitude = 15pt}},
860label/.style = {sloped, anchor = base, yshift = 17pt, font = \large}]
861\draw [brace, transform canvas={xshift=5pt}] (b.north-|right) -- node[label]{peeled} (c.south-|right);
862\draw [brace, transform canvas={xshift=30pt}] (b.north-|right) -- node[label]{unrolled} (c.south-|right);
863\draw [brace, transform canvas={xshift=5pt}] (e.north-|right) -- node[label]{unrolled} (f.south-|right);
864\draw [brace, transform canvas={xshift=55pt}] (d.north-|right) -- node[label]{unrolled} (g.south-|right);
865\draw [brace, transform canvas={xshift=80pt}] (a.north-|right) -- node[label]{peeled} (g.south-|right);
866\end{tikzpicture}
867\hspace{85pt}{}
868$$
869\caption{An example of loop transformations done on an \imp{} program. Parentheses are omitted in favour of
870blocks by indentation.}
871\label{fig:example1}
872\end{figureplr}
873\end{example}
874
875\section{Labelling: a quick sketch of the previous approach}
876\label{sec:labelling}
877Plainly labelled \imp{} is obtained by adding to the code \emph{cost labels} (with metavariables $\alpha,\beta,\ldots$), and cost-labelled statements:
878$$
879S,T\gramm \cdots \mid \alpha: S
880$$
881Cost labels allow us to track some program points along the compilation chain.
882For further details we refer to~\cite{D2.1}.
883
884With labels the small step semantics turns into a labelled transition system along with a natural notion of trace (\ie lists of labels) arises.
885The evaluation of statements is enriched with traces, so that rules follow a pattern similar to the following:
886$$
887\begin{array}{lblL}
888(\alpha: S, K,s) &\to[\alpha]_P (S,K,s)\\
889(\s{skip}, S \cdot K,s) &\to[\varepsilon]_P (S, K, s)\\
890& \text{etc.}
891\end{array}$$
892Here, we identify cost labels $\alpha$ with singleton traces and we use $\varepsilon$ for the empty trace.
893Cost labels are emitted by cost-labelled statements only\footnote{In the general case the evaluation of expressions can emit cost labels too (see~\ref{sec:conc}).}.
894We then write $\to[\lambda]\!\!^*$ for the transitive closure of the small step semantics which produces by concatenation the trace $\lambda$.
895
896\paragraph{Labelling}
897Given an \imp{} program $P$ its \emph{labelling} $\alpha:\Ell(P)$ in $\ell-\imp$ is defined by putting cost labels after every branching statement, at the start of both branches, and a cost label at the beginning of the program. Also, every labelled statement gets a cost label,
898which is a conservative approach to ensuring that all loops have labels inside them, as a loop might be done with \s{goto}s.
899The relevant cases are
900$$\begin{aligned}
901  \Ell(\sop{if}e\sbin{then}S\sbin{else}T) &=
902    \sop{if}e\sbin{then}\alpha:\Ell(S)\sbin{else}\beta:\Ell(T)\\
903  \Ell(\sop{while}e\sbin{do}S) &=
904    (\sop{while}e\sbin{do}\alpha:\Ell(S));\beta:\s{skip}\\
905  \Ell(\ell : S) &=
906    (\ell : \alpha : \Ell(S))
907  \end{aligned}$$
908where $\alpha,\beta$ are fresh cost labels.
909In all other cases the definition just passes to substatements.
910
911\paragraph{Labels in the rest of the compilation chain}
912All languages further down the chain get a new sequential statement $\sop{emit}\alpha$ whose effect is to be consumed in a labelled transition while keeping the same state.
913All other instructions guard their operational semantics and do not emit cost labels.
914
915Preservation of semantics throughout the compilation process is restated, in rough terms, as:
916$$
917\text{starting state of $P$}\to[\lambda]\!\!^*\;\text{halting state} \iff
918\text{starting state of $\mathcal C(P)$} \to[\lambda]\!\!^*\;\text{halting state}
919$$
920Here $P$ is a program of a language along the compilation chain, starting and halting states depend on the language, and $\mathcal C$ is the compilation function\footnote{The case of divergent computations needs to be addressed too.
921Also, the requirement can be weakened by demanding some sort weaker form of equivalence of the traces than equality.
922Both of these issues are beyond the scope of this presentation.}.
923
924\paragraph{Instrumentations}
925Let $\mathcal C$ be the whole compilation from $\ell\imp$ to the labelled version of some low-level language $L$.
926Supposing such compilation has not introduced any new loop or branching, we have that:
927\begin{itemize}
928\item
929Every loop contains at least a cost label (\emph{soundness condition})
930\item
931Every branching has different labels for the two branches (\emph{preciseness condition}).
932\end{itemize}
933With these two conditions, we have that each and every cost label in $\mathcal C(P)$ for any $P$ corresponds to a block of sequential instructions, to which we can assign a constant \emph{cost}\footnote{This in fact requires the machine architecture to be `simple enough', or for some form of execution analysis to take place.}
934We therefore may assume the existence of a \emph{cost mapping} $\kappa_P$ from cost labels to natural numbers, assigning to each cost label $\alpha$ the cost of the block containing the single occurrance of $\alpha$.
935
936Given any cost mapping $\kappa$, we can enrich a labelled program so that a particular fresh variable (the \emph{cost variable} $c$) keeps track of the summation of costs during the execution.
937We call this procedure \emph{instrumentation} of the program, and it is defined recursively by:
938$$
939\mathcal I(\alpha:S) = c \ass c + \kappa(\alpha) ; \mathcal I(S)
940$$
941In all other cases the definition passes to substatements.
942
943\paragraph{The problem with loop optimisations}
944Let us take loop peeling, and apply it to the labelling of a program without any prior adjustment:
945$$
946(\sop{while}e\sbin{do}\alpha:S);\beta:\s{skip}
947\mapsto
948(\sop{if}b\sbin{then} \alpha : S; \sop{while} b \sbin{do} \alpha :
949S[\ell'_i/\ell_i]);
950\beta:\s{skip}
951$$
952What happens is that the cost label $\alpha$ is duplicated with two distinct occurrences.
953If these two occurrences correspond to different costs in the compiled code, the best the cost mapping can do is to take the maximum of the two, preserving soundness (\ie the cost estimate still bounds the actual one) but losing preciseness (\ie the actual cost could be strictly less than its estimate).
954
955\section{Indexed labels}
956\label{sec:indexedlabels}
957This section presents the core of the new approach.
958In brief points it amounts to the following:
959\begin{enumerate}[\bfseries~\ref*{sec:indexedlabels}.1.]
960\item
961\label{en:outline1}
962Enrich cost labels with formal indices corresponding, at the beginning of the process, to which iteration of the loop they belong to.
963\item
964\label{en:outline2}
965Each time a loop transformation is applied and a cost labels is split in different occurrences, each of these will be reindexed so that every time they are emitted their position in the original loop will be reconstructed.
966\item
967\label{en:outline3}
968Along the compilation chain, alongside the \s{emit} instruction we add other instructions updating the indices, so that iterations of the original loops can be rebuilt at the operational semantics level.
969\item
970\label{en:outline4}
971The machinery computing the cost mapping will still work, but assigning costs to indexed cost labels, rather than to cost labels as we wish.
972However, \emph{dependent costs} can be calculated, where dependency is on which iteration of the containing loops we are in.
973\end{enumerate}
974
975\subsection{Indexing the cost labels}
976\label{ssec:indlabs}
977
978\paragraph{Formal indices and $\iota\ell\imp$}
979Let $i_0,i_1,\ldots$ be a sequence of distinguished fresh identifiers that will be used as loop indices.
980A \emph{simple expression} is an affine arithmetical expression in one of these indices, that is $a*i_k+b$ with $a,b,k \in \mathbb N$.
981Simple expressions $e_1=a_1*i_k+b_1$, $e_2=a2*i_k+b_2$ in the same index can be composed, yielding $e_1\circ e_2\ass (a_1a_2)*i_k + (a_1b2+b_1)$, and this operation has an identity element in $id_k \ass 1*i_k+0$.
982Constants can be expressed as simple expressions, so that we identify a natural $c$ with $0*i_k+c$.
983
984An \emph{indexing} (with metavariables $I$, $J$, \ldots) is a list of transformations of successive formal indices dictated by simple expressions, that is a mapping\footnote{Here we restrict each mapping to be a simple expression \emph{on the same index}.
985This might not be the case if more loop optimisations are accounted for (for example, interchanging two nested loops).}
986$$
987i_0\mapsto a_0*i_0+b_0,\dots, i_{k-1} \mapsto a_{k-1}*i_{k-1}+b_{k-1}
988$$
989
990An \emph{indexed cost label} (metavariables $\alphab$, $\betab$, \ldots) is the combination of a cost label $\alpha$ and an indexing $I$, written $\alpha\la I\ra$.
991The cost label underlying an indexed one is called its \emph{atom}.
992All plain labels can be considered as indexed ones by taking an empty indexing.
993
994\imp{} with indexed labels ($\iota\ell\imp$) is defined by adding to $\imp$ statements with indexed labels, and by having loops with formal indices attached to them:
995$$
996S,T,\ldots \gramm \cdots i_k:\sop{while}e\sbin{do}S\mid \alphab : S
997$$
998Note than unindexed loops still exist in the language: they will correspond to multi-entry loops which are ignored by indexing and optimisations.
999We will discuss the semantics later.
1000
1001\paragraph{Indexed labelling}
1002Given an $\imp$ program $P$, in order to index loops and assign indexed labels, we must first distinguish single-entry loops.
1003We sketch how this can be computed in the sequel.
1004
1005A first pass of the program $P$ can easily compute two maps: $\s{loopof}_P$ from each label $\ell$ to the occurrence (\ie the path) of a $\s{while}$ loop containing $\ell$, or the empty path if none exists; and $\s{gotosof}_P$ from a label $\ell$ to the occurrences of \s{goto}s pointing to it.
1006Then the set $\s{multientry}_P$ of multi-entry loops of $P$ can be computed by
1007$$
1008\s{multientry}_P\ass\{\, p \mid \exists \ell,q.p =\s{loopof}_P(\ell),q\in\s{gotosof}_P(\ell), q \not\le p\,\}
1009$$
1010Here $\le$ is the prefix relation\footnote{Possible simplifications to this procedure include keeping track of just the while loops containing labels and \s{goto}s (rather than paths in the syntactic tree of the program), and making two passes while avoiding building the map to sets $\s{gotosof}$}.
1011
1012Let $Id_k$ be the indexing of length $k$ made from identity simple expressions, \ie the sequence $i_0\mapsto id_0, \ldots , i_{k-1}\mapsto id_{k-1}$.
1013We define the tiered indexed labelling $\Ell^\iota_P (S,k)$ in program $P$ for occurrence $S$ of a statement in $P$ and a natural $k$ by recursion, setting:
1014$$
1015\Ell^\iota_P(S,k)\ass
1016\left\{
1017\begin{array}{lh{-100pt}l}
1018 (i_k:\sop{while}b\sbin{do}\alpha\la Id_{k+1}\ra : \Ell^\iota_P(T,k+1));\beta\la Id_k \ra : \s{skip}
1019\\& \text{if $S=\sop{while}b\sbin{do}T$ and $S\notin \s{multientry}_P$,}\\[3pt]
1020(\sop{while}b\sbin{do}\alpha\la Id_k \ra : \Ell^\iota_P(T,k));\beta\la Id_k \ra : \s{skip}
1021\\& \text{otherwise, if $S=\sop{while}b\sbin{do}T$,}\\[3pt]
1022\sop{if}b\sbin{then} \alpha\la Id_k \ra : \Ell^\iota_P(T_1,k) \sbin{else} \beta\la Id_k \ra : \Ell^\iota_P(T_2,k)
1023\\&\text{if $S=\sop{if}b\sbin{then}T_1\sbin{else}T_2$,}\\[3pt]
1024\ell:\alpha\la Id_k\ra : \Ell_P^\iota(T,k) & \text{if $S = \ell : T$,}\\[3pt]
1025\ldots
1026\end{array}
1027\right.
1028$$
1029Here, as usual, $\alpha$ and $\beta$ are fresh cost labels, and other cases just keep making the recursive calls on the substatements.
1030The \emph{indexed labelling} of a program $P$ is then defined as $\alpha\la \ra : \Ell^\iota_P(P,0)$, \ie a further fresh unindexed cost label is added at the start, and we start from level $0$.
1031
1032In plainer words: each single-entry loop is indexed by $i_k$ where $k$ is the number of other single-entry loops containing this one, and all cost labels under the scope of a single-entry loop indexed by $i_k$ are indexed by all indices $i_0,\ldots,i_k$, without any transformation.
1033
1034\subsection{Indexed labels and loop transformations}\label{ssec:looptrans}
1035We define the \emph{reindexing} $I \circ (i_k\mapsto a*i_k+b)$ as an operator on indexings by setting:
1036\begin{multline*}
1037(i_0\mapsto e_0,\ldots, i_k \mapsto e_k,\ldots,i_n\mapsto e_n)
1038\circ(i_k\mapsto a*i_k+b)
1039\ass\\
1040i_0\mapsto e_0,\ldots, i_k \mapsto e_k \circ(a*i_k+b),\ldots,i_n\mapsto e_n,
1041\end{multline*}
1042We further extend to indexed labels (by $\alpha\la I\ra\circ(i_k\mapsto e)\ass \alpha\la I\circ (i_k\mapsto e)\ra$) and also to statements in $\iota\ell\imp$ (by applying the above transformation to all indexed labels).
1043
1044We can then redefine loop peeling and loop unrolling, taking into account indexed labels.
1045It will only be possible to apply the transformation to indexed loops, that is loops that are single-entry.
1046The attentive reader will notice that no assumptions are made on the labelling of the statements that are involved.
1047In particular the transformation can be repeated and composed at will.
1048Also, note that after erasing all labelling information (\ie indexed cost labels and loop indices) we recover exactly the same transformations presented in~\ref{sec:defimp}.
1049
1050\paragraph{Indexed loop peeling}
1051$$
1052i_k:\sop{while}b\sbin{do}S\mapsto
1053\sop{if}b\sbin{then} S\circ (i_k\mapsto 0); i_k : \sop{while} b \sbin{do} S[\ell'_i/\ell_i]\circ(i_k\mapsto i_k + 1)
1054$$
1055As can be expected, the peeled iteration of the loop gets reindexed, always being the first iteration of the loop, while the iterations of the remaining loop are shifted by $1$. Notice that this transformation can lower the actual depth of some loops, however their index is left untouched.
1056
1057\paragraph{Indexed loop unrolling}
1058$$
1059\begin{array}{l}
1060\begin{array}{ncn}
1061i_k:\sop{while}b\sbin{do}S\\
1062\tikz\node[rotate=-90,inner sep=0pt]{$\mapsto$};
1063\end{array}\\
1064i_k:\sop{while} b \sbin{do}\\
1065\quad (S\circ(i_k\mapsto n*i_k) ;\\
1066\quad \sop{if} b \sbin{then}\\
1067\quad\quad (S[\ell^1_i/\ell_i]\circ(i_k\mapsto n*i_k+1) ;\\
1068\quad\quad\quad \vdots \\
1069\quad\quad \quad \sop{if} b \sbin{then}\\
1070\quad \quad \quad \quad S[\ell^n_i/\ell_i]\circ(i_k\mapsto n*i_k+n-1)
1071)\cdots )
1072\end{array}
1073$$
1074Again, the reindexing is as expected: each copy of the unrolled body has its indices remapped so that when they are executed, the original iteration of the loop to which they correspond can be recovered.
1075
1076\subsection{Semantics and compilation of indexed labels}
1077In order to make sense of loop indices, one must keep track of their values in the state.
1078A \emph{constant indexing} (metavariables $C,\ldots$) is an indexing which employs only constant simple expressions.
1079The evaluation of an indexing $I$ in a constant indexing $C$, noted $I|_C$, is defined by:
1080$$
1081I\circ(i_0\mapsto c_0,\ldots, i_{k-1}\mapsto c_{k-1}) \ass \alphab\circ(i_0\mapsto c_0)\circ\cdots\circ(i_{k-1}\mapsto c_{k-1})
1082$$
1083Here, we are using the definition of ${-}\circ{-}$ given in~\ref{ssec:indlabs}.
1084We consider the above defined only if the the resulting indexing is a constant one too\footnote{For example $(i_0\mapsto 2*i_0,i_1\mapsto i_1+1)|_{i_0\mapsto 2}$ is undefined, but $(i_0\mapsto 2*i_0,i_1\mapsto 0)|_{i_0\mapsto 2}= i_0\mapsto 4,i_1\mapsto 2$, is indeed a constant indexing, even if the domain of the original indexing is not covered by the constant one.}.
1085The definition is extended to indexed labels by $\alpha\la I\ra|_C\ass \alpha\la I|_C\ra$.
1086
1087Constant indexings will be used to keep track of the exact iterations of the original code that the emitted labels belong to.
1088We thus define two basic actions to update constant indexings: $C[i_k{\uparrow}]$ increments the value of $i_k$ by one, and $C[i_k{\downarrow}0]$ resets it to $0$.
1089
1090We are ready to update the definition of the operational semantics of indexed labelled \imp.
1091The emitted cost labels will now be ones indexed by constant indexings.
1092We add a special indexed loop construct for continuations that keeps track of active indexed loop indices:
1093$$
1094K,\ldots  \gramm \cdots | i_k:\sop{while} b \sbin {do} S \sbin{then}  K
1095$$
1096The difference between the regular stack concatenation $i_k:\sop{while}b\sbin{do}S\cdot K$ and the new constructor is that the latter indicates the loop is the active one in which we already are, while the former is a loop that still needs to be started\footnote{In the presence of \s{continue} and \s{break} statements active loops need to be kept track of in any case.}.
1097The \s{find} function is updated accordingly with the case
1098$$
1099\s{find}(\ell, i_k:\sop{while}b\sbin{do}S, K) \ass \s{find}(\ell, S, i_k: \sop{while}b\sbin{do}S\sbin{then}K)
1100$$
1101The state will now be a 4-tuple $(S,K,s,C)$ which adds a constant indexing to the triple of the regular semantics.
1102The small-step rules for all statements remain the same, without touching the $C$ parameter (in particular unindexed loops behave the same as usual), apart from the ones regarding cost-labels and indexed loops.
1103The remaining cases are:
1104$$\begin{aligned}
1105   (\alphab : S,K,s,C) &\to[\alphab|_C]_P (S,K,s,C)\\
1106   (i_k:\sop{while}b\sbin{do}S,K,C) &\to[\varepsilon]_P
1107    \begin{cases}
1108     (S,i_k:\sop{while}b\sbin{do}S\sbin{then} K,s,C[i_k{\downarrow}0])
1109     \\\hskip 125pt \text{if $(b,s)\eval v\neq 0$,}\\
1110     \rlap{(\s{skip}, K, s, C)}\hskip 125pt \text{otherwise}
1111    \end{cases}\\
1112   (\s{skip}, i_k:\sop{while}b\sbin{do}S\sbin{then}K,C) &\to[\varepsilon]_P
1113    \begin{cases}
1114     (S,i_k:\sop{while}b\sbin{do}S\sbin{then} K,s,C[i_k{\uparrow}])
1115      \\\hskip 125pt \text{if $(b,s)\eval v\neq 0$,}\\
1116     \rlap{(\s{skip}, K, s, C)} \hskip 125pt \text{otherwise}
1117    \end{cases}
1118  \end{aligned}$$
1119Some explanations are in order:
1120\begin{itemize}
1121\item
1122Emitting a label always instantiates it with the current indexing.
1123\item
1124Hitting an indexed loop the first time initializes the corresponding index to 0; continuing the same loop increments the index as expected.
1125\item
1126The \s{find} function ignores the current indexing: this is correct under the assumption that all indexed loops are single entry, so that when we land inside an indexed loop with a \s{goto}, we are sure that its current index is right.
1127\item
1128The starting state with store $s$ for a program $P$ is $(P,\s{halt},s,(i_0\mapsto 0,\dots,i_{n-1}\mapsto 0)$ where $i_0,\ldots,i_{n-1}$ cover all loop indices of $P$\footnote{For a program which is the indexed labelling of an \imp{} one this corresponds to the maximum nesting of single-entry loops.
1129We can also avoid computing this value in advance if we define $C[i{\downarrow}0]$ to extend $C$'s domain as needed, so that the starting constant indexing can be the empty one.}.
1130\end{itemize}
1131
1132\paragraph{Compilation}
1133Further down the compilation chain the loop structure is usually partially or completely lost.
1134We cannot rely on it anymore to keep track of the original source code iterations.
1135We therefore add, alongside the \s{emit} instruction, two other sequential instructions $\sop{ind_reset}k$ and $\sop{ind_inc}k$ whose sole effect is to reset to 0 (resp.\ increment by 1) the loop index $i_k$, as kept track of in a constant indexing accompanying the state.
1136
1137The first step of compilation from $\iota\ell\imp$ consists of prefixing the translation of an indexed loop $i_k:\s{while}\ b\ \s{do}\ S$ with $\sop{ind_reset}k$ and postfixing the translation of its body $S$ with $\sop{ind_inc}k$.
1138Later in the compilation chain we must propagate the instructions dealing with cost labels.
1139
1140We would like to stress the fact that this machinery is only needed to give a suitable semantics of observables on which preservation proofs can be done.
1141By no means are the added instructions and the constant indexing in the state meant to change the actual (let us say denotational) semantics of the programs.
1142In this regard the two new instruction have a similar role as the \s{emit} one.
1143A forgetful mapping of everything (syntax, states, operational semantics rules) can be defined erasing all occurrences of cost labels and loop indices, and the result will always be a regular version of the language considered.
1144
1145\paragraph{Stating the preservation of semantics}
1146In fact, the statement of preservation of semantics does not change at all, if not for considering traces of evaluated indexed cost labels rather than traces of plain ones.
1147
1148\subsection{Dependent costs in the source code}
1149\label{ssec:depcosts}
1150The task of producing dependent costs from constant costs induced by indexed labels is quite technical.
1151Before presenting it here, we would like to point out that the annotations produced by the procedure described in this Subsection, even if correct, can be enormous and unreadable.
1152In Section~\ref{sec:conc}, where we detail the actual implementation, we will also sketch how we mitigated this problem.
1153
1154Having the result of compiling the indexed labelling $\Ell^\iota(P)$ of an \imp{} program $P$, we may still suppose that a cost mapping can be computed, but from indexed labels to naturals.
1155We want to annotate the source code, so we need a way to express and compute the costs of cost labels, \ie group the costs of indexed labels to ones of their atoms.
1156In order to do so we introduce \emph{dependent costs}.
1157Let us suppose that for the sole purpose of annotation, we have available in the language ternary expressions of the form
1158$$\tern e {f_1}{f_2},$$
1159and that we have access to common operators on integers such as equality, order and modulus.
1160
1161\paragraph{Simple conditions}
1162
1163First, we need to shift from \emph{transformations} of loop indices to \emph{conditions} on them.
1164We identify a set of conditions on natural numbers which are able to express the image of any composition of simple expressions.
1165\emph{Simple conditions} are of three possible forms:
1166\begin{itemize}
1167\item
1168Equality $i_k=n$ for some natural $n$.
1169\item
1170Inequality $i_k\ge n$ for some natural $n$.
1171\item
1172Modular equality together with inequality $i_k\bmod a = b\wedge i_k\ge n$ for naturals $a, b, n$.
1173\end{itemize}
1174The `always true' simple condition is given by $i_k\ge 0$.
1175We write $i_k\bmod a = b$ as a simple condition for $i_k\bmod a = b\wedge i_k\ge 0$.
1176
1177Given a simple condition $p$ and a constant indexing $C$ we can easily define when $p$ holds for $C$ (written $p\circ C$).
1178A \emph{dependent cost expression} is an expression built solely out of integer constants and ternary expressions with simple conditions at their head.
1179Given a dependent cost expression $e$ where all of the loop indices appearing in it are in the domain of a constant indexing $C$, we can define the value $e\circ C\in \mathbb N$ by:
1180$$n\circ C\ass n,\qquad (\tern p e f)\circ C\ass
1181\begin{cases}
1182  e\circ C& \text{if $p\circ C$,}\\
1183  f\circ C& \text{otherwise.}
1184\end{cases}$$
1185
1186\paragraph{From indexed costs to dependent ones}
1187Every simple expression $e$ corresponds to a simple condition $p(e)$ which expresses the set of values that $e$ can take.
1188Following is the definition of such a relation.
1189We recall that in this development, loop indices are always mapped to simple expressions over the same index.
1190If it was not the case, the condition obtained from an expression should be on the mapped index, not the indeterminate of the simple expression.
1191We leave all generalisations of what we present here for further work:
1192$$
1193p(a*i_k+b)\ass
1194\begin{cases}
1195i_k = b & \text{if $a = 0$,}\\
1196i_k \ge b & \text{if $a = 1$,}\\
1197i_k\bmod a = b' \wedge i_k \ge b & \text{otherwise, where $b' = b\bmod a$}.
1198\end{cases}
1199$$
1200Now, suppose we are given a mapping $\kappa$ from indexed labels to natural numbers.
1201We will transform it in a mapping (identified, via abuse of notation, with the same symbol $\kappa$) from atoms to \imp{} expressions built with ternary expressions which depend solely on loop indices.
1202To that end we define an auxiliary function $\kappa^\alpha_L$, parameterized by atoms and words of simple expressions, and defined on \emph{sets} of $n$-uples of simple expressions (with $n$ constant across each such set, \ie each set is made of words all with the same length).
1203
1204We will employ a bijection between words of simple expressions and indexings, given by:\footnote{Lists of simple expressions are in fact how indexings are -represented in CerCo's current implementation of the compiler.}
1205$$
1206i_0\mapsto e_0,\ldots,i_{k-1}\mapsto e_{k-1} \cong e_0\cdots e_{k-1}.
1207$$
1208As usual, $\varepsilon$ denotes the empty word/indexing, and juxtaposition is used to denote word concatenation.
1209
1210For every set $s$ of $n$-uples of simple expressions, we are in one of the following three exclusive cases:
1211\begin{itemize}
1212\item
1213$S=\emptyset$.
1214\item
1215$S=\{\varepsilon\}$.
1216\item
1217There is a simple expression $e$ such that $S$ can be decomposed in $eS'+S''$, with $S'\neq \emptyset$ and none of the words in $S''$ starting with $e$.
1218\end{itemize}
1219Here $eS'$ denotes prepending $e$ to all elements of $S'$ and $+$ is disjoint union.
1220This classification can serve as the basis of a definition by recursion on $n+\card S$ where $n$ is the size of tuples in $S$ and $\card S$ is its cardinality.
1221Indeed in the third case in $S'$ the size of tuples decreases strictly (and cardinality does not increase) while for $S''$ the size of tuples remains the same but cardinality strictly decreases.
1222The expression $e$ of the third case will be chosen as minimal for some total order\footnote{The specific order used does not change the correctness of the procedure, but different orders can give more or less readable results. A ``good'' order is the lexicographic one, with $a*i_k+b \le a'*i_k+b'$ if $a<a'$ or $a=a'$ and $b\le b'$.}.
1223
1224Following is the definition of the auxiliary function $\kappa^\alpha_L$, which follows the recursion scheme presented above:
1225$$
1226\begin{aligned}
1227\kappa^\alpha_L(\emptyset) &\ass 0\\
1228\kappa^\alpha_L(\{\varepsilon\}) &\ass \kappa(\alpha\la L\ra) \\
1229\kappa^\alpha_L(eS'+S'') &\ass \tern{p(e)}{\kappa^\alpha_{Le}(S')}{\kappa^\alpha_L(S'')}
1230\end{aligned}
1231$$
1232\noindent
1233Finally, the wanted dependent cost mapping is defined by
1234$$
1235\kappa(\alpha)\ass\kappa^\alpha_\varepsilon(\{\,L\mid \alpha\la L\ra \text{ appears in the compiled code}\,\})
1236$$
1237
1238\paragraph{Indexed instrumentation}
1239The \emph{indexed instrumentation} generalises the instrumentation presented in~\ref{sec:labelling}.
1240We described above how cost atoms can be mapped to dependent costs.
1241The instrumentation must also insert code dealing with the loop indices.
1242As instrumentation is done on the code produced by the labelling phase, all cost labels are indexed by identity indexings.
1243The relevant cases of the recursive definition (supposing $c$ is the cost variable) are then:
1244$$
1245\begin{aligned}
1246\mathcal I^\iota(\alpha\la Id_k\ra:S) &= c\ass c + \kappa(\alpha);\mathcal I^\iota(S)\\
1247\mathcal I^\iota(i_k : \sop{while}b\sbin{do}S) &=
1248  i_k \ass 0; \sop{while}b\sbin{do}(\mathcal I^\iota (S); i_k \ass i_k + 1)
1249\end{aligned}
1250$$
1251
1252\subsection{A detailed example}\label{ssec:detailedex}
1253Take the program in \autoref{fig:example1}. Its initial labelling will be:
1254$$\begin{array}{l}
1255\alpha\la\ra : s\ass 0;\\
1256i\ass 0;\\
1257i_0:\sop{while}i<n\sbin{do}\\
1258\inde \beta\la i_0\ra : p\ass 1;\\
1259\inde j\ass 1;\\
1260\inde i_1:\sop{while}j \le i\sbin{do}\\
1261\inde \inde \gamma\la i_0, i_1\ra : p\ass j*p\\
1262\inde \inde j\ass j+1;\\
1263\inde \delta\la i_0\ra : s\ass s+p;\\
1264\inde i\ass i+1;\\
1265\epsilon\la\ra:\s{skip}
1266\end{array}
1267$$
1268(a single \s{skip} after the $\delta$ label has been suppressed, and we are using the identification
1269between indexings and tuples of simple expressions explained in \autoref{ssec:depcosts}).
1270Supposing for example, $n=3$
1271the trace of the program will be
1272$$\alpha\la\ra\,\beta\la 0 \ra\, \delta\la 0\ra\,\beta\la 1\ra\,\gamma\la 1,0\ra\,
1273\delta\la 1\ra\,\beta\la 2\ra\,\gamma\la 2,0\ra\,\gamma\la 2, 1\ra\,\delta\la 2\ra\,
1274\epsilon\la\ra$$
1275Now let as apply the transformations of \autoref{fig:example1} with the additional
1276information detailed in \autoref{ssec:looptrans}. The result is shown in
1277\autoref{fig:example2}.
1278\begin{figureplr}
1279$$
1280\begin{array}{l}
1281\mbox{\color{blue}\boldmath$\alpha\la\ra $}:s\ass 0;\\
1282i\ass 0;\\
1283\tikztargetm{a}{\s{if}}\ i<n\sbin{then}\\
1284\inde \mbox{\color{blue}\boldmath$\beta\la0\ra $}:p\ass 1;\\
1285\inde j\ass 1;\\
1286\inde i_1:\sop{while}j \le i\sbin{do}\\
1287\inde \inde \mbox{\color{blue}\boldmath$\gamma\la 0, i_1\ra $}:p\ass j*p\\
1288\inde \inde j\ass j+1;\\
1289\inde \mbox{\color{blue}\boldmath$\delta\la 0\ra $}: s\ass s+p;\\
1290\inde i\ass i+1;\\
1291\inde i_0:\tikztargetm{d}{\s{while}}\ i<n\sbin{do}\\
1292\inde \inde \mbox{\color{blue}\boldmath$\beta\la 2*i_0+1\ra $}:p\ass 1;\\
1293\inde \inde j\ass 1;\\
1294\inde \inde \tikztargetm{b}{\s{if}}\ j \le i\sbin{then}\\
1295\inde \inde \inde \mbox{\color{blue}\boldmath$\gamma\la  2*i_0+1, 0\ra $}:p\ass j*p\\
1296\inde \inde \inde j\ass j+1;\\
1297\inde \inde \inde \sop{if}j \le i\sbin{then}\\
1298\inde \inde \inde \inde \mbox{\color{blue}\boldmath$\gamma\la  2*i_0+1, 1\ra $}: p\ass j*p\\
1299\inde \inde \inde \inde j\ass j+1;\\
1300\inde \inde \inde \inde i_1:\s{while}\ j \le i\sbin{do}\\
1301\inde \inde \inde \inde \inde \mbox{\color{blue}\boldmath$\gamma\la  2*i_0+1, 2*i_1 + 2 \ra $}:p\ass j*p\\
1302\inde \inde \inde \inde \inde j\ass j+1;\\
1303\inde \inde \inde \inde \inde \sop{if}j \le i\sbin{then}\\
1304\inde \inde \inde \inde \inde \inde \mbox{\color{blue}\boldmath$\gamma\la  2*i_0+1, 2*i_1 + 3\ra $}:p\ass j*p\\
1305\inde \inde \inde \inde \inde \inde \tikztargetm{c}j\ass j+1;\\
1306\inde \inde \mbox{\color{blue}\boldmath$\delta\la 2*i_0+1\ra$}: s\ass s+p;\\
1307\inde \inde i\ass i+1;\\
1308\inde \inde \sop{if}i<n\sbin{then}\\
1309\inde \inde \inde \mbox{\color{blue}\boldmath$\beta\la 2*i_0+2\ra $}:p\ass 1;\\
1310\inde \inde \inde j\ass 1;\\
1311\inde \inde \inde i_1:\tikztargetm{e}{\s{while}}\ j < i\sbin{do}\\
1312\inde \inde \inde \inde \mbox{\color{blue}\boldmath$\gamma\la 2*i_0+2, 2*i_1\ra$}: p\ass j*p\\
1313\inde \inde \inde \inde j\ass j+1;\\
1314\inde \inde \inde \inde \s{if}\ j < i\sbin{then}\\
1315\inde \inde \inde \inde \inde \mbox{\color{blue}\boldmath$\gamma\la2*i_0+2, 2*i_1+1\ra$}: p\ass j*p\\
1316\inde \inde \inde \inde \inde \tikztargetm{f}j\ass j+1;\\
1317\inde \inde \inde \mbox{\color{blue}\boldmath$\delta\la 2*i_0+2\ra $}: s\ass s+p;\\
1318\inde \inde \inde i\ass i+1\tikztargetm{g};{}\\
1319\mbox{\color{blue}\boldmath$\epsilon\la\ra $}:\s{skip}
1320\end{array}$$
1321\caption{The result of applying reindexing loop transformations on the
1322program in \autoref{fig:example1}.}\label{fig:example2}
1323\end{figureplr}
1324One can check that the transformed code leaves the same trace when executed.
1325
1326Now let us compute the dependent cost of $\gamma$, supposing no other loop transformations
1327are done. Ordering its indexings we
1328have the following list:
1329\begin{equation}
1330\label{eq:inds} 
1331\begin{aligned}
1332  &0, i_1\\
1333  &2*i_0+1, 0\\
1334  &2*i_0+1, 1\\
1335  &2*i_0+1, 2*i_1+2\\
1336  &2*i_0+1, 2*i_1+3\\
1337  &2*i_0+2, 2*i_1\\
1338  &2*i_0+2, 2*i_1+1
1339  \end{aligned}
1340\end{equation}
1341
1342The resulting dependent cost will then be
1343\def\indetern#1#2#3{\begin{tabular}[t]{nL}(#1)\mathrel ?{}\\\hskip 15pt #2:{}\\\hskip 15pt #3\end{tabular}}
1344\def\tern#1#2#3{(#1)\mathrel ? #2:#3}
1345\begin{equation}\label{eq:ex}
1346\kappa^\iota(\gamma)=
1347\indetern{i_0 = 0}
1348  {\tern{i_1\ge 0}a0}
1349  {\indetern{i_0\bmod 2 = 1 \wedge i_0\ge 1}
1350    {\indetern{i_1=0}
1351      b
1352      {\indetern{i_1 = 1}
1353        c
1354        {\indetern{i_1\bmod 2 = 0 \wedge i_1\ge 2}
1355          d
1356          {\tern{i_1\bmod 2 = 1 \wedge i_1\ge 3}e0}
1357        }
1358      }
1359    }
1360    {\indetern{i_0\bmod 2 = 0 \wedge i_0\ge 2}
1361      {\indetern{i_1 \bmod 2 = 0 \wedge i_1 \ge 0}
1362        f
1363        {\tern{i_1 \bmod 2 = 1 \wedge i_1 \ge 1}g0}
1364      }
1365      0
1366    }
1367  }
1368\end{equation}
1369We will see later on \autopageref{pag:continued} how such an expression can be simplified.
1370\section{Notes on the implementation and further work}
1371\label{sec:conc}
1372Implementing the indexed label approach in CerCo's untrusted Ocaml prototype does not introduce many new challenges beyond what has already been presented for the toy language, \imp{} with \s{goto}s.
1373\s{Clight}, the \s{C} fragment source language of CerCo's compilation chain~\cite{D2.1}, has several more fetaures, but few demand changes in the indexed labelled approach.
1374
1375\paragraph{Indexed loops \emph{vs}. index update instructions}
1376In our presentation we have indexed loops in $\iota\ell\imp$, while we hinted that later languages in the compilation chain would have specific index update instructions.
1377In CerCo's actual compilation chain from \s{Clight} to 8051 assembly, indexed loops are only in \s{Clight}, while from \s{Cminor} onward all languages have the same three cost-involving instructions: label emitting, index resetting and index incrementing.
1378
1379\paragraph{Loop transformations in the front end}
1380We decided to implement the two loop transformations in the front end, namely in \s{Clight}.
1381This decision is due to user readability concerns: if costs are to be presented to the programmer, they should depend on structures written by the programmer himself.
1382If loop transformation were performed later it would be harder to create a correspondence between loops in the control flow graph and actual loops written in the source code.
1383However, another solution would be to index loops in the source code and then use these indices later in the compilation chain to pinpoint explicit loops of the source code: loop indices can be used to preserve such information, just like cost labels.
1384
1385\paragraph{Break and continue statements}
1386\s{Clight}'s loop flow control statements for breaking and continuing a loop are equivalent to appropriate \s{goto} statements.
1387The only difference is that we are assured that they cannot cause loops to be multi-entry, and that when a transformation such as loop peeling is complete, they need to be replaced by actual \s{goto}s (which happens further down the compilation chain anyway).
1388
1389\paragraph{Function calls}
1390Every internal function definition has its own space of loop indices.
1391Executable semantics must thus take into account saving and resetting the constant indexing of current loops upon hitting a function call, and restoring it upon return of control.
1392A peculiarity is that this cannot be attached to actions that save and restore frames: namely in the case of tail calls the constant indexing needs to be saved whereas the frame does not.
1393
1394\paragraph{Cost-labelled expressions}
1395In labelled \s{Clight}, expressions also get cost labels, due to the presence of ternary conditional expressions (and lazy logical operators, which get translated to ternary expressions too).
1396Adapting the indexed labelled approach to cost-labelled expressions does not pose any particular problems.
1397
1398\paragraph{Simplification of dependent costs}
1399As previously mentioned, the na\"{i}ve application of the procedure described in~\ref{ssec:depcosts} produces unwieldy cost annotations.
1400In our implementation several transformations are used to simplify such complex dependent costs.
1401
1402Disjunctions of simple conditions are closed under all logical operations, and it can be computed whether such a disjunction implies a simple condition or its negation.
1403This can be used to eliminate useless branches of dependent costs, to merge branches that share the same value, and possibly to simplify the third case of simple condition.
1404Examples of the three transformations are respectively:
1405\begin{itemize}
1406\item $
1407\verb+(_i_0 == 0)?+x\verb+:(_i_0 >= 1)?+y\verb+:+z
1408\mapsto
1409\verb+(_i_0 == 0)?+x\verb+:+y,
1410$
1411\item $
1412c\texttt{?}x\verb+:(+d\texttt{?}x\texttt{:}y\verb+)+
1413\mapsto
1414\texttt{(}c\texttt{ || }d\texttt{)?}x\texttt{:}y,
1415$
1416\item \begin{tabular}[t]{np{\linewidth}n}
1417$\verb+(_i_0 == 0)?+x\verb+:(_i_0 % 2 == 0 && _i_0 >= 2)?+y\verb+:+z
1418\mapsto$ \\\hfill
1419$\verb+(_i_0 == 0)?+x\verb+:(_i_0 % 2 == 0)?+y\verb+:+z.
1420$\end{tabular}
1421\end{itemize}
1422The second transformation tends to accumulate disjunctions, to the detriment of readability.
1423A further transformation swaps two branches of the ternary expression if the negation of the condition can be expressed with fewer clauses.
1424For example:
1425$$ \verb+(_i_0 % 3 == 0 || _i_0 % 3 == 1)?+x\verb+:+y \mapsto
1426\verb+(_i_0 % 3 == 2)?+y\verb+:+x.
1427$$
1428Picking up again the example depicted in \autoref{ssec:detailedex}, \label{pag:continued}
1429we can see that the cost in \eqref{eq:ex} can be simplified to the following,
1430using some of the transformation described above:
1431$$
1432\kappa^\iota(\gamma)=
1433\indetern{i_0 = 0}
1434  a
1435  {\indetern{i_0\bmod 2 = 1}
1436    {\indetern{i_1=0}
1437      b
1438      {\indetern{i_1 = 1}
1439        c
1440        {\indetern{i_1\bmod 2 = 0}
1441          de
1442        }
1443      }
1444    }
1445    {\indetern{i_1 \bmod 2 = 0}
1446      fg
1447    }
1448  }
1449$$
1450One should keep in mind that the example was wilfully complicated, in practice
1451the cost expressions produced have rarely more clauses
1452than the number of nested loops containing the annotation.
1453\paragraph{Updates to the frama-C cost plugin}
1454Cerco's frama-C~\cite{framac} cost plugin\todo{is there a reference for this?}{} has been updated to take into account our new notion of dependent costs.
1455The frama-c framework expands ternary expressions to branch statements, introducing temporaries along the way.
1456This makes the task of analyzing ternary cost expressions rather daunting.
1457It was deemed necessary to provide an option in the compiler to use actual branch statements for cost annotations rather than ternary expressions, so that at least frama-C's use of temporaries in cost annotation could be avoided.
1458The cost analysis carried out by the plugin now takes into account such dependent costs.
1459
1460The only limitation (which actually simplifies the code) is that, within a dependent cost, simple conditions with modulus on the same loop index should not be modulo different numbers.
1461This corresponds to a reasonable limitation on the number of times loop unrolling may be applied to the same loop: at most once.
1462
1463\paragraph{Further work}
1464For the time being, indexed labels are only implemented in the untrusted Ocaml compiler, while they are not present yet in the Matita code.
1465Porting them should pose no significant problem.
1466Once ported, the task of proving properties about them in Matita can begin.
1467
1468Because most of the executable operational semantics of the languages across the frontend and the backend are oblivious to cost labels, it should be expected that the bulk of the semantic preservation proofs that still needs to be done will not get any harder because of indexed labels.
1469The only trickier point that we foresee would be in the translation of \s{Clight} to \s{Cminor}, where we pass from structured indexed loops to atomic instructions on loop indices.
1470
1471An invariant which should probably be proved and provably preserved along the compilation chain is the non-overlap of indexings for the same atom.
1472Then, supposing cost correctness for the unindexed approach, the indexed one will just need to amend the proof that
1473$$\forall C\text{ constant indexing}.\forall \alpha\la I\ra\text{ appearing in the compiled code}.
1474  \kappa(\alpha)\circ (I\circ C) = \kappa(\alpha\la I \ra).
1475$$
1476Here, $C$ represents a snapshot of loop indices in the compiled code, while $I\circ C$ is the corresponding snapshot in the source code.
1477Semantics preservation will ensure that when, with snapshot $C$, we emit $\alpha\la I\ra$ (that is, we have $\alpha\la I\circ C\ra$ in the trace), $\alpha$ must also be emitted in the source code with indexing $I\circ C$, so the cost $\kappa(\alpha)\circ (I\circ C)$ applies.
1478
1479Aside from carrying over the proofs, we would like to extend the approach to more loop transformations.
1480Important examples are loop inversion (where a for loop is reversed, usually to make iterations appear to be truly independent) or loop interchange (where two nested loops are swapped, usually to have more loop invariants or to enhance strength reduction).
1481This introduces interesting changes to the approach, where we would have indexings such as:
1482$$i_0\mapsto n - i_0\quad\text{or}\quad i_0\mapsto i_1, i_1\mapsto i_0.$$
1483In particular dependency over actual variables of the code would enter the frame, as indexings would depend on the number of iterations of a well-behaving guarded loop (the $n$ in the first example).
1484
1485Finally, as stated in the introduction, the approach should allow some integration of techniques for cache analysis, a possibility that for now has been put aside as the standard 8051 target architecture for the CerCo project lacks a cache.
1486Two possible developments for this line of work present themselves:
1487\begin{enumerate}
1488\item
1489One could extend the development to some 8051 variants, of which some have been produced with a cache.
1490\item
1491One could make the compiler implement its own cache: this cannot apply to \textsc{ram} accesses of the standard 8051 architecture, as the difference in cost of accessing the two types of \textsc{ram} is only one clock cycle, which makes any implementation of cache counterproductive.
1492So for this proposal, we could either artificially change the accessing cost of \textsc{ram} of the model just for the sake of possible future adaptations to other architectures, or otherwise model access to an external memory by means of the serial port.
1493\end{enumerate}
1494
1495
1496%
1497% \newpage
1498%
1499% \includepdf[pages={-}]{plugin.pdf}
1500%
1501%
1502% \newpage
1503%
1504% \includepdf[pages={-}]{fopara.pdf}
1505%
1506%
1507% \newpage
1508%
1509% \includepdf[pages={-}]{tlca.pdf}
1510%
1511% \bibliographystyle{plain}
1512\bibliography{bib}
1513
1514\end{document}
Note: See TracBrowser for help on using the repository browser.