source: Papers/fopara2013/fopara13.tex @ 3335

Last change on this file since 3335 was 3335, checked in by amadio, 6 years ago

rob

File size: 61.7 KB
Line 
1\documentclass{llncs}
2
3\usepackage{amsfonts}
4\usepackage{amsmath}
5\usepackage{amssymb} 
6\usepackage[english]{babel}
7\usepackage{color}
8\usepackage{fancybox}
9\usepackage{fancyvrb}
10\usepackage{graphicx}
11\usepackage[colorlinks,
12            bookmarks,bookmarksopen,bookmarksdepth=2]{hyperref}
13\usepackage{hyphenat}
14\usepackage[utf8x]{inputenc}
15\usepackage{listings}
16\usepackage{mdwlist}
17\usepackage{microtype}
18\usepackage{stmaryrd}
19\usepackage{url}
20
21\usepackage{tikz}
22\usetikzlibrary{positioning,calc,patterns,chains,shapes.geometric,scopes}
23
24%\renewcommand{\verb}{\lstinline}
25%\def\lstlanguagefiles{lst-grafite.tex}
26%\lstset{language=Grafite}
27\lstset{language=C}
28
29\newlength{\mylength}
30\newenvironment{frametxt}%
31        {\setlength{\fboxsep}{5pt}
32                \setlength{\mylength}{\linewidth}%
33                \addtolength{\mylength}{-2\fboxsep}%
34                \addtolength{\mylength}{-2\fboxrule}%
35                \Sbox
36                \minipage{\mylength}%
37                        \setlength{\abovedisplayskip}{0pt}%
38                        \setlength{\belowdisplayskip}{0pt}%
39                }%
40                {\endminipage\endSbox
41                        \[\fbox{\TheSbox}\]}
42
43\title{Certified Complexity (CerCo)\thanks{The project CerCo acknowledges the
44financial support of the Future and Emerging Technologies (FET) programme within
45the Seventh Framework Programme for Research of the European Commission, under
46FET-Open grant number: 243881}}
47\author{
48%Roberto
49R.~M. Amadio$^{4}$ \and
50%Nicolas
51N.~Ayache$^{3,4}$ \and
52%François
53F.~Bobot$^{3,4}$ \and
54%Jacob
55J.~P. Boender$^1$ \and
56%Brian
57B.~Campbell$^2$ \and
58%Ilias
59I.~Garnier$^2$ \and
60%Antoine
61A.~Madet$^4$ \and
62%James
63J.~McKinna$^2$ \and
64%Dominic
65D.~P. Mulligan$^1$ \and
66%Mauro
67M.~Piccolo$^1$ \and
68%Randy
69R.~Pollack$^2$ \and
70%Yann
71Y.~R\'egis-Gianas$^{3,4}$ \and
72%Claudio
73C.~Sacerdoti Coen$^1$ \and
74%Ian
75I.~Stark$^2$ \and
76%Paolo
77P.~Tranquilli$^1$}
78\institute{Dipartimento di Informatica - Scienza e Ingegneria, Universit\'a di
79Bologna \and
80LFCS, School of Informatics, University of Edinburgh
81\and INRIA (Team $\pi$r2 )
82\and
83Universit\`e Paris Diderot
84}
85
86\bibliographystyle{splncs03}
87
88\begin{document}
89
90\maketitle
91
92\begin{abstract}
93This paper provides an overview of the FET-Open Project CerCo
94(`Certified Complexity'). The project's main achievement is
95the development of a technique for performing static analyses of non-functional
96properties of programs (time, space) at the source level, without loss of accuracy
97and with a small, trusted code base. The main software component
98developed is a certified compiler producing cost annotations. The compiler
99translates source code to object code and produces an instrumented copy of the
100source code. This instrumentation exposes at
101the source level---and tracks precisely---the actual (non-asymptotic)
102computational cost of the input program. Untrusted invariant generators and trusted theorem provers
103are then used to compute and certify the parametric execution time of the
104code.
105\end{abstract}
106
107% ---------------------------------------------------------------------------- %
108% SECTION                                                                      %
109% ---------------------------------------------------------------------------- %
110\section{Introduction}
111
112\paragraph{Problem statement} Computer programs can be specified with both
113functional constraints (what a program must do) and non-functional constraints
114(e.g. what resources---time, space, etc.---the program may use).  In the current
115state of the art, functional properties are verified for high-level source code
116by combining user annotations (e.g. preconditions and invariants) with a
117multitude of automated analyses (invariant generators, type systems, abstract
118interpretation, theorem proving, and so on). By contrast, non-functional properties
119are generally checked on low-level object code, but also demand information
120about high-level functional behaviour that must somehow be recreated.
121
122This situation presents several problems: 1) it can be hard to infer this
123high-level structure in the presence of compiler optimisations; 2) techniques
124working on object code are not useful in early development, yet problems
125detected later are more expensive to tackle; 3) parametric cost analysis is very
126hard: how can we reflect a cost that depends on the execution state (e.g. the
127value of a register or a carry bit) to a cost that the user can understand
128looking at source code? 4) functional analysis performed only on object code
129makes any contribution from the programmer hard, leading to less precision in
130the estimates.
131
132\paragraph{Vision and approach} We want to reconcile functional and
133non-functional analyses: to share information and perform both at the same time
134on source code.
135%
136What has previously prevented this approach is the lack of a uniform and precise
137cost model for high-level code: 1) each statement occurrence is compiled
138differently and optimisations may change control flow; 2) the cost of an object
139code instruction may depend on the runtime state of hardware components like
140pipelines and caches, all of which are not visible in the source code.
141
142To solve the issue, we envision a new generation of compilers able to keep track
143of program structure through compilation and optimisation, and to exploit that
144information to define a cost model for source code that is precise, non-uniform,
145and accounts for runtime state. With such a source-level cost model we can
146reduce non-functional verification to the functional case and exploit the state
147of the art in automated high-level verification~\cite{survey}. The techniques
148currently used by the Worst Case Execution Time (WCET) community, which performs its analysis on object code
149, are still available but can now be coupled with an additional source-level
150analysis. Where the approach produces precise cost models too complex to reason
151about, safe approximations can be used to trade complexity with precision.
152Finally, source code analysis can be made during early development stages, when
153components have been specified but not implemented: source code modularity means
154that it is enough to specify the non-functional behaviour of unimplemented
155components.
156
157\paragraph{Contributions} We have developed what we refer to as \emph{the labeling approach} \cite{labeling}, a
158technique to implement compilers that induce cost models on source programs by
159very lightweight tracking of code changes through compilation. We have studied
160how to formally prove the correctness of compilers implementing this technique.
161We have implemented such a compiler from C to object binaries for the 8051
162micro-controller, and verified it in an interactive theorem prover. We have
163implemented a Frama-C plugin \cite{framac} that invokes the compiler on a source
164program and uses this to generate invariants on the high-level source that
165correctly model low-level costs. Finally, the plugin certifies that the program
166respects these costs by calling automated theorem provers, a new and innovative
167technique in the field of cost analysis. As a case study, we show how the
168plugin can automatically compute and certify the exact reaction time of Lustre
169\cite{lustre} data flow programs compiled into C.
170
171\section{Project context and objectives}
172Formal methods for verification of functional properties of programs have
173now reached a level of maturity and automation that is facilitating a slow but
174increasing adoption in production environments. For safety critical code, it is
175becoming commonplace to combine rigorous software engineering methodologies and testing
176with static analysis, taking the strong points of each approach and mitigating
177their weaknesses. Of particular interest are open frameworks
178for the combination of different formal methods, where the programs can be
179progressively specified and are continuously enriched with new safety
180guarantees: every method contributes knowledge (e.g. new invariants) that
181becomes an assumption for later analysis.
182
183The scenario for the verification of non-functional properties (time spent,
184memory used, energy consumed) is bleaker and the future seems to be getting even
185worse. Most industries verify that real time systems meet their deadlines
186by simply performing many runs of the system and timing their execution,  computing the
187maximum time and adding an empirical safety margin, claiming the result to be a
188bound for the WCET of the program. Formal methods and software to statically
189analyse the WCET of programs exist, but they often produce bounds that are too
190pessimistic to be useful. Recent advancements in hardware architectures are all
191focused on the improvement of the average case performance, not the
192predictability of the worst case. Execution time is becoming increasingly
193dependent on execution history and the internal state of
194hardware components like pipelines and caches. Multi-core processors and non-uniform
195memory models are drastically reducing the possibility of performing
196static analysis in isolation, because programs are less and less time
197composable. Clock-precise hardware models are necessary for static analysis, and
198obtaining them is becoming harder as a consequence of the increased sophistication
199of hardware design.
200
201Despite the aforementioned problems, the need for reliable real time systems and programs is
202increasing, and there is increasing pressure from the research community towards
203the differentiation of hardware. The aim is to introduce alternative
204hardware with more predictable behaviour and hence more suitability for static
205analyses, for example, the decoupling of execution time from execution history
206by introducing randomisation \cite{proartis}.
207
208In the CerCo project \cite{cerco} we do not try to address this problem, optimistically
209assuming that static analysis of non-functional properties of programs will
210become feasible in the longer term. The main objective of our work is
211instead to bring together static analysis of functional and non-functional
212properties, which, according to the current state of the art, are completely
213independent activities with limited exchange of information: while the
214functional properties are verified on the source code, the analysis of
215non-functional properties is entirely performed on the object code to exploit
216clock-precise hardware models.
217
218Analysis currently takes place on object code for two main reasons.
219First, there is a lack of a uniform, precise cost model for source
220code instructions (or even basic blocks). During compilation, high level
221instructions are torn apart and reassembled in context-specific ways so that
222identifying a fragment of object code and a single high level instruction is
223infeasible. Even the control flow of the object and source code can be very
224different as a result of optimisations, for example aggressive loop
225optimisations may completely transform source level loops. Despite the lack of a uniform, compilation- and
226program-independent cost model on the source language, the literature on the
227analysis of non-asymptotic execution time on high level languages that assumes
228such a model is growing and gaining momentum. However, unless we can provide a
229replacement for such cost models, this literature's future practical impact looks
230to be minimal. Some hope has been provided by the EmBounded project \cite{embounded}, which
231compositionally compiles high-level code to a byte code that is executed by an
232emulator with guarantees on the maximal execution time spent for each byte code
233instruction. The approach provides a uniform model at the price of the model's
234precision (each cost is a pessimistic upper bound) and performance of the
235executed code (because the byte code is emulated  compositionally instead of
236performing a fully non-compositional compilation).
237%
238The second reason to perform the analysis on the object code is that bounding
239the worst case execution time of small code fragments in isolation (e.g. loop
240bodies) and then adding up the bounds yields very poor estimates because no
241knowledge on the hardware state can be assumed when executing the fragment. By
242analysing longer runs the bound obtained becomes more precise because the lack
243of knowledge on the initial state has less of an effect on longer computations.
244
245In CerCo we propose a radically new approach to the problem: we reject the idea
246of a uniform cost model and we propose that the compiler, which knows how the
247code is translated, must return the cost model for basic blocks of high level
248instructions. It must do so by keeping track of the control flow modifications
249to reverse them and by interfacing with static analysers.
250
251By embracing compilation, instead of avoiding it like EmBounded did, a CerCo
252compiler can at the same time produce efficient code and return costs that are
253as precise as the static analysis can be. Moreover, we allow our costs to be
254parametric: the cost of a block can depend on actual program data or on a
255summary of the execution history or on an approximated representation of the
256hardware state. For example, loop optimizations may assign to a loop body a cost
257that is a function of the number of iterations performed. As another example,
258the cost of a block may be a function of the vector of stalled pipeline states,
259which can be exposed in the source code and updated at each basic block exit. It
260is parametricity that allows one to analyse small code fragments without losing
261precision: in the analysis of the code fragment we do not have to ignore the
262initial hardware state. On the contrary, we can assume that we know exactly which
263state (or mode, as the WCET literature calls it) we are in.
264
265The cost of an execution is the sum of the cost of basic blocks multiplied by
266the number of times they are executed, which is a functional property of the
267program. Therefore, in order to perform (parametric) time analysis of programs,
268it is necessary to combine a cost model with control and data flow analysis.
269Current state of the art WCET technology
270\cite{stateart} performs the analysis on the object code, where the logic of the
271program is harder to reconstruct and most information available at the source
272code level has been lost. Imprecision in the analysis leads to useless bounds. To
273augment precision, the tools ask the user to provide constraints on the object
274code control flow, usually in the form of bounds on the number of iterations of
275loops or linear inequalities on them. This requires the user to manually link
276the source and object code, translating his assumptions on the source code
277(which may be wrong) to object code constraints. The task is error prone and
278hard, especially in the presence of complex compiler optimisations.
279
280The CerCo approach has the potential to dramatically improve the state of the
281art. By performing control and data flow analyses on the source code, the error
282prone translation of invariants is completely avoided. It is in fact performed
283by the compiler itself when it induces the cost model on the source language.
284Moreover, any available technique for the verification of functional properties
285can be immediately reused and multiple techniques can collaborate together to
286infer and certify cost invariants for the program. Parametric cost analysis
287becomes the default one, with non parametric bounds used as last resorts when
288trading the complexity of the analysis with its precision. \emph{A priori}, no
289technique previously used in traditional WCET is lost: they can just be applied
290at the source code level.
291
292Traditional techniques for WCET that work on object code are also affected by
293another problem: they cannot be applied before the generation of the object
294code. Functional properties can be analysed in early development stages, while
295analysis of non-functional properties may come too late to avoid expensive
296changes to the program architecture. Our approach already works in early
297development stages by axiomatically attaching costs to unimplemented components.
298
299All software used to verify properties of programs must be as bug free as
300possible. The trusted code base for verification is made by the code that needs
301to be trusted to believe that the property holds. The trusted code base of
302state-of-the-art WCET tools is very large: one needs to trust the control flow
303analyser and the linear programming libraries it uses and also the formal models
304of the hardware. In CerCo we are moving the control flow analysis to the source
305code and we are introducing a non-standard compiler too. To reduce the trusted
306code base, we implemented a prototype and a static analyser in an interactive
307theorem prover, which was used to certify that the cost computed on the source
308code is indeed the one actually spent by the hardware. Formal models of the
309hardware and of the high level source languages were also implemented in the
310interactive theorem prover. Control flow analysis on the source code has been
311obtained using invariant generators, tools to produce proof obligations from
312generated invariants and automatic theorem provers to verify the obligations. If
313the automatic provers are able to generate proof traces that can be
314independently checked, the only remaining component that enters the trusted code
315base is an off-the-shelf invariant generator which, in turn, can be proved
316correct using an interactive theorem prover. Therefore we achieve the double
317objective of allowing the use more off-the-shelf components (e.g. provers and
318invariant generators) whilst reducing the trusted code base at the same time.
319
320%\paragraph{Summary of the CerCo objectives.} To summarize, the goal of CerCo is
321% to reconcile functional with non-functional analysis by performing them together
322% on the source code, sharing common knowledge about execution invariants. We want
323% to achieve the goal implementing a new generation of compilers that induce a
324% parametric, precise cost model for basic blocks on the source code. The compiler
325% should be certified using an interactive theorem prover to minimize the trusted
326% code base of the analysis. Once the cost model is induced, off-the-shelf tools
327% and techniques can be combined together to infer and prove parametric cost
328% bounds.
329%The long term benefits of the CerCo vision are expected to be:
330%1. the possibility to perform static analysis during early development stages
331%2.  parametric bounds made easier
332%3.  the application of off-the-shelf techniques currently unused for the
333% analysis of non-functional properties, like automated proving and type systems
334%4. simpler and safer interaction with the user, that is still asked for
335% knowledge, but on the source code, with the additional possibility of actually
336% verifying the provided knowledge
337%5. a reduced trusted code base
338%6. the increased accuracy of the bounds themselves.
339%
340%The long term success of the project is hindered by the increased complexity of
341% the static prediction of the non-functional behaviour of modern hardware. In the
342% time frame of the European contribution we have focused on the general
343% methodology and on the difficulties related to the development and certification
344% of a cost model inducing compiler.
345
346\section{The typical CerCo workflow}
347\begin{figure}[!t]
348\begin{tabular}{l@{\hspace{0.2cm}}|@{\hspace{0.2cm}}l}
349\begin{lstlisting}
350char a[] = {3, 2, 7, -4};
351char treshold = 4;
352
353int main() {
354  char j;
355  char *p = a;
356  int found = 0;
357  for (j=0; j < 4; j++) {
358    if (*p <= treshold)
359      { found++; }
360    p++;
361  }
362  return found;
363}
364\end{lstlisting}
365&
366%  $\vcenter{\includegraphics[width=7.5cm]{interaction_diagram.pdf}}$
367\begin{tikzpicture}[
368    baseline={([yshift=-.5ex]current bounding box)},
369    element/.style={draw, text width=1.6cm, on chain, text badly centered},
370    >=stealth
371    ]
372{[start chain=going below, node distance=.5cm]
373\node [element] (cerco) {CerCo\\compiler};
374\node [element] (cost)  {CerCo\\cost plugin};
375{[densely dashed]
376\node [element] (ded)   {Deductive\\platform};
377\node [element] (check) {Proof\\checker};
378}
379}
380\coordinate [left=3.5cm of cerco] (left);
381{[every node/.style={above, text width=3.5cm, text badly centered,
382                     font=\scriptsize}]
383\draw [<-] ([yshift=-1ex]cerco.north west) coordinate (t) --
384    node {C source}
385    (t-|left);
386\draw [->] (cerco) -- (cost);
387\draw [->] ([yshift=1ex]cerco.south west) coordinate (t) --
388    node {C source+cost annotations}
389    (t-|left) coordinate (cerco out);
390\draw [->] ([yshift=1ex]cost.south west) coordinate (t) --
391    node {C source+cost annotations\\+synthesized assertions}
392    (t-|left) coordinate (out);
393{[densely dashed]
394\draw [<-] ([yshift=-1ex]ded.north west) coordinate (t) --
395    node {C source+cost annotations\\+complexity assertions}
396    (t-|left) coordinate (ded in) .. controls +(-.5, 0) and +(-.5, 0) .. (out);
397\draw [->] ([yshift=1ex]ded.south west) coordinate (t) --
398    node {complexity obligations}
399    (t-|left) coordinate (out);
400\draw [<-] ([yshift=-1ex]check.north west) coordinate (t) --
401    node {complexity proof}
402    (t-|left) .. controls +(-.5, 0) and +(-.5, 0) .. (out);
403\draw [dash phase=2.5pt] (cerco out) .. controls +(-1, 0) and +(-1, 0) ..
404    (ded in);
405}}
406%% user:
407% head
408\draw (current bounding box.west) ++(-.2,.5)
409    circle (.2) ++(0,-.2) -- ++(0,-.1) coordinate (t);
410% arms
411\draw (t) -- +(-.3,-.2);
412\draw (t) -- +(.3,-.2);
413% body
414\draw (t) -- ++(0,-.4) coordinate (t);
415% legs
416\draw (t) -- +(-.2,-.6);
417\draw (t) -- +(.2,-.6);
418\end{tikzpicture}
419\end{tabular}
420\caption{On the left: code to count the array's elements
421 that are greater or equal to the treshold. On the right: CerCo's interaction
422 diagram. CerCo's components are drawn solid.}
423\label{test1}
424\end{figure}
425We illustrate the workflow we envisage (on the right of~\autoref{test1})
426on an example program (on the left of~\autoref{test1}).
427The user writes the program and feeds it to the CerCo compiler, which outputs
428an instrumented version of the same program that updates global variables
429that record the elapsed execution time and the stack space usage.
430The red lines in \autoref{itest1} are the instrumentation introduced by the
431compiler. The annotated program can then be enriched with complexity assertions
432in the style of Hoare logic, that are passed to a deductive platform (in our
433case Frama-C). We provide as a Frama-C cost plugin a simple automatic
434synthesiser for complexity assertions (the blue lines in \autoref{itest1}),
435which can be overridden by the user to increase or decrease accuracy. From the
436assertions, a general purpose deductive platform produces proof obligations
437which in turn can be closed by automatic or interactive provers, ending in a
438proof certificate. Nine proof obligations are generated
439from~\autoref{itest1} (to prove that the loop invariant holds after
440one execution if it holds before, to prove that the whole program execution
441takes at most 1228 cycles, etc.). The CVC3 prover closes all obligations
442in a few seconds on routine commodity hardware.
443%
444\fvset{commandchars=\\\{\}}
445\lstset{morecomment=[s][\color{blue}]{/*@}{*/},
446        moredelim=[is][\color{blue}]{$}{$},
447        moredelim=[is][\color{red}]{|}{|}}
448\begin{figure}[!p]
449\begin{lstlisting}
450|int __cost = 33; int __stack = 5, __stack_max = 5;|
451
452|void __cost_incr(int incr) { __cost = __cost + incr; }|
453
454|void __stack_incr(int incr) {
455  __stack = __stack + incr;
456  __stack_max = __stack_max < __stack ? __stack : __stack_max;
457}|
458
459char a[4] = { 3, 2, 7, 252, };
460char treshold = 4;
461
462/*@ behaviour stack_cost:
463      ensures __stack_max <=
464              __max(\old(__stack_max), \old(__stack));
465      ensures __stack == \old(__stack);
466     
467    behaviour time_cost:
468      ensures __cost <= \old(__cost)+1228; */
469int main(void)
470{
471  char j;
472  char *p;
473  int found;
474  |__stack_incr(0); __cost_incr(91);|
475  p = a;
476  found = 0;
477  $__l: /* internal */$
478  /*@ for time_cost: loop invariant
479        __cost <=
480        \at(__cost,__l)+220*(__max(\at(5-j,__l), 0)-__max (5-j, 0));
481      for stack_cost: loop invariant
482        __stack_max == \at(__stack_max,__l);
483      for stack_cost: loop invariant
484        __stack == \at(__stack,__l);
485      loop variant 4-j; */
486  for (j = 0; j < 4; j++) {
487    |__cost_incr(76);|
488    if (*p <= treshold) {
489      |__cost_incr(144);|
490      found++;
491    } else {
492      |__cost_incr(122);|
493    }
494    p++;
495  }
496  |__cost_incr(37); __stack_incr(-0);|
497  return found;
498}
499\end{lstlisting}
500\caption{The instrumented version of the program in \autoref{test1},
501 with instrumentation added by the CerCo compiler in red and cost invariants
502 added by the CerCo Frama-C plugin in blue. The \texttt{\_\_cost},
503 \texttt{\_\_stack} and \texttt{\_\_stack\_max} variables hold the elapsed time
504in clock cycles and the current and maximum stack usage. Their initial values
505hold the clock cycles spent in initialising the global data before calling
506\texttt{main} and the space required by global data (and thus unavailable for
507the stack).
508}
509\label{itest1}
510\end{figure}
511\section{Main S\&T results of the CerCo project}
512We will now review the main S\&T results achieved in the CerCo project.
513
514% \emph{Dependent labeling~\cite{?}.} The basic labeling approach assumes a
515% bijective mapping between object code and source code O(1) blocks (called basic
516% blocks). This assumption is violated by many program optimizations (e.g. loop
517% peeling and loop unrolling). It also assumes the cost model computed on the
518% object code to be non parametric: every block must be assigned a cost that does
519% not depend on the state. This assumption is violated by stateful hardware like
520% pipelines, caches, branch prediction units. The dependent labeling approach is
521% an extension of the basic labeling approach that allows to handle parametric
522% cost models. We showed how the method allows to deal with loop optimizations and
523% pipelines, and we speculated about its applications to caches.
524%
525% \emph{Techniques to exploit the induced cost model.} Every technique used for
526% the analysis of functional properties of programs can be adapted to analyse the
527% non-functional properties of the source code instrumented by compilers that
528% implement the labeling approach. In order to gain confidence in this claim, we
529% showed how to implement a cost invariant generator combining abstract
530% interpretation with separation logic ideas \cite{separation}. We integrated
531% everything in the Frama-C modular architecture, in order to compute proof
532% obligations from functional and cost invariants and to use automatic theorem
533% provers on them. This is an example of a new technique that is not currently
534% exploited in WCET analysis. It also shows how precise functional invariants
535% benefits the non-functional analysis too. Finally, we show how to fully
536% automatically analyse the reaction time of Lustre nodes that are first compiled
537% to C using a standard Lustre compiler and then processed by a C compiler that
538% implements the labeling approach.
539
540% \emph{The CerCo compiler.} This is a compiler from a large subset of the C
541% program to 8051/8052 object code,
542% integrating the labeling approach and a static analyser for 8051 executables.
543% The latter can be implemented easily and does not require dependent costs
544% because the 8051 microprocessor is a very simple one, with constant-cost
545% instruction. It was chosen to separate the issue of exact propagation of the
546% cost model from the orthogonal problem of the static analysis of object code
547% that may require approximations or dependent costs. The compiler comes in
548% several versions: some are prototypes implemented directly in OCaml, and they
549% implement both the basic and dependent labeling approaches; the final version
550% is extracted from a Matita certification and at the moment implements only the
551% basic approach.
552
553\subsection{The (basic) labeling approach}
554The labeling approach is the main technique that
555underlies all the developments in CerCo. It allows to track the evolution of
556basic blocks during compilation in order to propagate the cost model from the
557object code to the source code without loosing precision in the process.
558\paragraph{Problem statement.} Given a source program $P$, we want to obtain an
559instrumented source program $P'$,  written in the same programming language, and
560the object code $O$ such that: 1) $P'$ is obtained by inserting into $P$ some
561additional instructions to update global cost information like the amount of
562time spent during execution or the maximal stack space required; 2) $P$ and $P'$ 
563must have the same functional behaviour, i.e.\ they must produce that same output
564and intermediate observables; 3) $P$ and $O$ must have the same functional
565behaviour; 4) after execution and in interesting points during execution, the
566cost information computed by $P'$ must be an upper bound of the one spent by $O$ 
567to perform the corresponding operations (soundness property); 5) the difference
568between the costs computed by $P'$ and the execution costs of $O$ must be
569bounded by a program dependent constant (precision property).
570
571\paragraph{The labeling software components.} We solve the problem in four
572stages \cite{labeling}, implemented by four software components that are used
573in sequence.
574
575The first component labels the source program $P$ by injecting label emission
576statements in appropriate positions to mark the beginning of basic blocks.
577% The
578% set of labels with their positions is called labeling.
579The syntax and semantics
580of the source programming language is augmented with label emission statements.
581The statement ``EMIT $\ell$'' behaves like a NOP instruction that does not affect the
582program state or control flow, but its execution is observable.
583% Therefore the observables of a run of a program becomes a stream
584% of label emissions: $\ell_1,\ldots,\ell_n$, called the program trace. We clarify the conditions
585% that the labeling must respect later.
586
587The second component is a labeling preserving compiler. It can be obtained from
588an existing compiler by adding label emission statements to every intermediate
589language and by propagating label emission statements during compilation. The
590compiler is correct if it preserves both the functional behaviour of the program
591and the traces of observables.
592% We may also ask that the function that erases the cost
593% emission statements commute with compilation. This optional property grants that
594% the labeling does not interfere with the original compiler behaviour. A further
595% set of requirements will be added later.
596
597The third component is a static object code analyser. It takes in input a labeled
598object code and it computes the scope of each of its label emission statements,
599i.e.\ the tree of instructions that may be executed after the statement and before
600a new label emission is encountered.
601It is a tree and not a sequence because the scope
602may contain a branching statement. In order to grant that such a finite tree
603exists, the object code must not contain any loop that is not broken by a label
604emission statement. This is the first requirement of a sound labeling. The
605analyser fails if the labeling is unsound. For each scope, the analyser
606computes an upper bound of the execution time required by the scope, using the
607maximum of the costs of the two branches in case of a conditional statement.
608Finally, the analyser computes the cost of a label by taking the maximum of the
609costs of the scopes of every statement that emits that label.
610
611The fourth and last component takes in input the cost model computed at step 3
612and the labelled code computed at step 1. It outputs a source program obtained
613by replacing each label emission statement with a statement that increments the
614global cost variable with the cost associated to the label by the cost model. 
615The obtained source code is the instrumented source code.
616
617\paragraph{Correctness.} Requirements 1, 2 and 3 of the problem statement
618obviously hold, with 2 and 3 being a consequence of the definition of a correct
619labeling preserving compiler. It is also obvious that the value of the global
620cost variable of an instrumented source code is at any time equal to the sum of
621the costs of the labels emitted by the corresponding labelled code. Moreover,
622because the compiler preserves all traces, the sum of the costs of the labels
623emitted in the source and target labelled code are the same. Therefore, to
624satisfy the fourth requirement, we need to grant that the time taken to execute
625the object code is equal to the sum of the costs of the labels emitted by the
626object code. We collect all the necessary conditions for this to happen in the
627definition of a sound labeling: a) all loops must be broken by a cost emission
628statement;  b) all program instructions must be in the scope of some cost
629emission statement. To satisfy also the fifth requirement, additional
630requirements must be imposed on the object code labeling to avoid all uses of
631the maximum in the cost computation: the labeling is precise if every label is
632emitted at most once and both branches of a conditional jump start with a label
633emission statement.
634
635The correctness and precision of the labeling approach only rely on the
636correctness and precision of the object code labeling. The simplest
637way to achieve them is to impose correctness and precision
638requirements also on the initial labeling produced by the first software
639component, and to ask the compiler to preserve these
640properties too. The latter requirement imposes serious limitations on the
641compilation strategy and optimizations: the compiler may not duplicate any code
642that contains label emission statements, like loop bodies. Therefore several
643loop optimizations like peeling or unrolling are prevented. Moreover, precision
644of the object code labeling is not sufficient \emph{per se} to obtain global
645precision: we also implicitly assumed the static analysis to be able to
646associate a precise constant cost to every instruction. This is not possible in
647presence of stateful hardware whose state influences the cost of operations,
648like pipelines and caches. In the next subsection we will see an extension of the
649basic labeling approach to cover this situation.
650
651The labeling approach described in this section can be applied equally well and
652with minor modifications to imperative and functional languages
653\cite{labeling2}. We have tested it on a simple imperative language without
654functions (a While language), on a subset of C and on two compilation chains for
655a purely functional higher order language. The two main changes required to deal
656with functional languages are: 1) because global variables and updates are not
657available, the instrumentation phase produces monadic code to “update” the
658global costs; 2) the requirements for a sound and precise labeling of the
659source code must be changed when the compilation is based on CPS translations.
660In particular, we need to introduce both labels emitted before a statement is
661executed and labels emitted after a statement is executed. The latter capture
662code that is inserted by the CPS translation and that would escape all label
663scopes.
664
665Phases 1, 2 and 3 can be applied as well to logic languages (e.g. Prolog).
666However, the instrumentation phase cannot: in standard Prolog there is no notion
667of (global) variable whose state is not retracted during backtracking.
668Therefore, the cost of executing computations that are later backtracked would
669not be correctly counted in. Any extension of logic languages with
670non-backtrackable state should support the labeling approach.
671
672\subsection{Dependent labeling}
673The core idea of the basic labeling approach is to establish a tight connection
674between basic blocks executed in the source and target languages. Once the
675connection is established, any cost model computed on the object code can be
676transferred to the source code, without affecting the code of the compiler or
677its proof. In particular, it is immediate that we can also transport cost models
678that associate to each label functions from hardware state to natural numbers.
679However, a problem arises during the instrumentation phase that replaces cost
680emission statements with increments of global cost variables. The global cost
681variable must be incremented with the result of applying the function associated
682to the label to the hardware state at the time of execution of the block.
683The hardware state comprises both the functional state that affects the
684computation (the value of the registers and memory) and the non-functional
685state that does not (the pipeline and cache contents for example). The former is
686in correspondence with the source code state, but reconstructing the
687correspondence may be hard and lifting the cost model to work on the source code
688state is likely to produce cost expressions that are too hard to reason on.
689Luckily enough, in all modern architectures the cost of executing single
690instructions is either independent of the functional state or the jitter---the
691difference between the worst and best case execution times---is small enough
692to be bounded without loosing too much precision. Therefore we can concentrate
693on dependencies over the “non-functional” parts of the state only.
694
695The non-functional state has no correspondence in the high level state and does
696not influence the functional properties. What can be done is to expose the
697non-functional state in the source code. We just present here the basic
698intuition in a simplified form: the technical details that allow to handle the
699general case are more complex and can be found in the CerCo deliverables. We add
700to the source code an additional global variable that represents the
701non-functional state and another one that remembers the last labels emitted. The
702state variable must be updated at every label emission statement, using an
703update function which is computed during static analysis too. The update
704function associates to each label a function from the recently emitted labels
705and old state to the new state. It is computed composing the semantics of every
706instruction in a basic block and restricting it to the non-functional part of
707the state.
708
709Not all the details of the non-functional state needs to be exposed, and the
710technique works better when the part of state that is required can be summarized
711in a simple data structure. For example, to handle simple but realistic
712pipelines it is sufficient to remember a short integer that encodes the position
713of bubbles (stuck instructions) in the pipeline. In any case, the user does not
714need to understand the meaning of the state to reason over the properties of the
715program. Moreover, at any moment the user or the invariant generator tools that
716analyse the instrumented source code produced by the compiler can decide to
717trade precision of the analysis with simplicity by approximating the parametric
718cost with safe non parametric bounds. Interestingly, the functional analysis of
719the code can determine which blocks are executed more frequently in order to
720approximate more aggressively the ones that are executed less.
721
722Dependent labeling can also be applied to allow the compiler to duplicate
723blocks that contain labels (e.g. in loop optimizations) \cite{paolo}. The effect is to assign
724a different cost to the different occurrences of a duplicated label. For
725example, loop peeling turns a loop into the concatenation of a copy of the loop
726body (that executes the first iteration) with the conditional execution of the
727loop (for the successive iterations). Because of further optimizations, the two
728copies of the loop will be compiled differently, with the first body usually
729taking more time.
730
731By introducing a variable that keep tracks of the iteration number, we can
732associate to the label a cost that is a function of the iteration number. The
733same technique works for loop unrolling without modifications: the function will
734assign a cost to the even iterations and another cost to the odd ones. The
735actual work to be done consists in introducing in the source code and for each
736loop a variable that counts the number of iterations. The loop optimization code
737that duplicate the loop bodies must also modify the code to propagate correctly
738the update of the iteration numbers. The technical details are more complex and
739can be found in the CerCo reports and publications. The implementation, however,
740is quite simple and the changes to a loop optimizing compiler are minimal.
741
742\subsection{Techniques to exploit the induced cost model}
743We review the cost synthesis techniques developed in the project.
744The starting hypothesis is that we have a certified methodology to annotate
745blocks in the source code with constants which provide a sound and possibly
746precise upper bound on the cost of executing the blocks after compilation to
747object code.
748
749The principle that we have followed in designing the cost synthesis tools is
750that the synthetic bounds should be expressed and proved within a general
751purpose tool built to reason on the source code. In particular, we rely on the
752Frama-C tool to reason on C code and on the Coq proof-assistant to reason on
753higher-order functional programs.
754
755This principle entails that: 1)
756The inferred synthetic bounds are indeed correct as long as the general purpose
757tool is. 2) There is no limitation on the class of programs that can be handled
758as long as the user is willing to carry on an interactive proof.
759
760Of course, automation is desirable whenever possible. Within this framework,
761automation means writing programs that give hints to the general purpose tool.
762These hints may take the form, say, of loop invariants/variants, of predicates
763describing the structure of the heap, or of types in a light logic. If these
764hints are correct and sufficiently precise the general purpose tool will produce
765a proof automatically, otherwise, user interaction is required.
766
767\paragraph{The Cost plugin and its application to the Lustre compiler.}
768Frama-C \cite{framac} is a set of analysers for C programs with a
769specification language called ACSL. New analyses can be dynamically added
770through a plugin system. For instance, the Jessie plugin allows deductive
771verification of C programs with respect to their specification in ACSL, with
772various provers as back-end tools.
773We developed the CerCo Cost plugin for the Frama-C platform as a proof of
774concept of an automatic environment exploiting the cost annotations produced by
775the CerCo compiler. It consists of an OCaml program which in first approximation
776takes the following actions: 1) it receives as input a C program, 2) it
777applies the CerCo compiler to produce a related C program with cost annotations,
7783) it applies some heuristics to produce a tentative bound on the cost of
779executing the C functions of the program as a function of the value of their
780parameters, 4) the user can then call the Jessie plugin to discharge the
781related proof obligations.
782In the following we elaborate on the soundness of the framework and the
783experiments we performed with the Cost tool on the C programs produced by a
784Lustre compiler.
785
786\paragraph{Soundness.} The soundness of the whole framework depends on the cost
787annotations added by the CerCo compiler, the synthetic costs produced by the
788Cost plugin, the verification conditions (VCs) generated by Jessie, and the
789external provers discharging the VCs. The synthetic costs being in ACSL format,
790Jessie can be used to verify them. Thus, even if the added synthetic costs are
791incorrect (relatively to the cost annotations), the process as a whole is still
792correct: indeed, Jessie will not validate incorrect costs and no conclusion can
793be made about the WCET of the program in this case. In other terms, the
794soundness does not really depend on the action of the Cost plugin, which can in
795principle produce any synthetic cost. However, in order to be able to actually
796prove a WCET of a C function, we need to add correct annotations in a way that
797Jessie and subsequent automatic provers have enough information to deduce their
798validity. In practice this is not straightforward even for very simple programs
799composed of branching and assignments (no loops and no recursion) because a fine
800analysis of the VCs associated with branching may lead to a complexity blow up.
801\paragraph{Experience with Lustre.} Lustre is a data-flow language to program
802synchronous systems and the language comes with a compiler to C. We designed a
803wrapper for supporting Lustre files.
804The C function produced by the compiler implements the step function of the
805synchronous system and computing the WCET of the function amounts to obtain a
806bound on the reaction time of the system. We tested the Cost plugin and the
807Lustre wrapper on the C programs generated by the Lustre compiler. For programs
808consisting of a few hundred lines of code, the Cost plugin computes a WCET and Alt-
809Ergo is able to discharge all VCs automatically.
810
811\paragraph{Handling C programs with simple loops.}
812The cost annotations added by the CerCo compiler take the form of C instructions
813that update by a constant a fresh global variable called the cost variable.
814Synthesizing a WCET of a C function thus consists in statically resolving an
815upper bound of the difference between the value of the cost variable before and
816after the execution of the function, i.e. find in the function the instructions
817that update the cost variable and establish the number of times they are passed
818through during the flow of execution. In order to do the analysis the plugin
819makes the following assumptions on the programs:
8201. No recursive functions.
8212. Every loop must be annotated with a variant. The variants of ‘for’ loops are
822automatically inferred.
823
824The plugin proceeds as follows.
825First the call graph of the program is computed.
826Then the computation of the cost of the function is performed by traversing its
827control flow graph. If the function $f$ calls the function $g$ 
828then the function $g$ 
829is processed before the function $f$. The cost at a node is the maximum of the
830costs of the successors.
831In the case of a loop with a body that has a constant cost for every step of the
832loop, the cost is the product of the cost of the body and of the variant taken
833at the start of the loop.
834In the case of a loop with a body whose cost depends on the values of some free
835variables, a fresh logic function $f$ is introduced to represent the cost of the
836loop in the logic assertions. This logic function takes the variant as a first
837parameter. The other parameters of $f$ are the free variables of the body of the
838loop. An axiom is added to account the fact that the cost is accumulated at each
839step of the loop.
840The cost of the function is directly added as post-condition of the function
841
842The user can influence the annotation by different means:
843by using more precise variants or
844by annotating function with cost specification. The plugin will use this cost
845for the function instead of computing it.
846\paragraph{C programs with pointers.}
847When it comes to verifying programs involving pointer-based data structures,
848such as linked lists, trees, or graphs, the use of traditional first-order logic
849to specify, and of SMT solvers to verify, shows some limitations. Separation
850logic \cite{separation} is then an elegant alternative. It is a program logic
851with a new notion of conjunction to express spatial heap separation. Bobot has
852recently introduced in the Jessie plugin automatically generated separation
853predicates to simulate separation logic reasoning in a traditional verification
854framework where the specification language, the verification condition
855generator, and the theorem provers were not designed with separation logic in
856mind. CerCo's plugin can exploit the separation predicates to automatically
857reason on the cost of execution of simple heap manipulation programs such as an
858in-place list reversal.
859
860\subsection{The CerCo Compiler}
861In CerCo we have developed a certain number of cost preserving compilers based
862on the labeling approach. Excluding an initial certified compiler for a While
863language, all remaining compilers target realistic source languages---a pure
864higher order functional language and a large subset of C with pointers, gotos
865and all data structures---and real world target processors---MIPS and the
866Intel 8051/8052 processor family. Moreover, they achieve a level of optimization
867that ranges from moderate (comparable to gcc level 1) to intermediate (including
868loop peeling and unrolling, hoisting and late constant propagation). The so
869called Trusted CerCo Compiler is the only one that was implemented in the
870interactive theorem prover Matita~\cite{matita} and its costs certified. The code distributed
871is obtained extracting OCaml code from the Matita implementation. In the rest of
872this section we will only focus on the Trusted CerCo Compiler, that only targets
873the C language and the 8051/8052 family, and that does not implement the
874advanced optimizations yet. Its user interface, however, is the same as the one
875of the other versions, in order to trade safety with additional performances. In
876particular, the Frama-C CerCo plugin can work without recompilation with all
877compilers.
878
879The 8051/8052 microprocessor is a very simple one, with constant-cost
880instructions. It was chosen to separate the issue of exact propagation of the
881cost model from the orthogonal problem of the static analysis of object code
882that may require approximations or dependent costs.
883
884The (trusted) CerCo compiler implements the following optimizations: cast
885simplification, constant propagation in expressions, liveness analysis driven
886spilling of registers, dead code elimination, branch displacement, tunneling.
887The two latter optimizations are performed by our optimizing assembler
888\cite{correctness}. The back-end of the compiler works on three addresses
889instructions, preferred to static single assignment code for the simplicity of
890the formal certification.
891
892The CerCo compiler is loosely based on the CompCert compiler \cite{compcert}, a
893recently developed certified compiler from C to the PowerPC, ARM and x86
894microprocessors. Contrarily to CompCert, both the CerCo code and its
895certification are open source. Some data structures and language definitions for
896the front-end are directly taken from CompCert, while the back-end is a redesign
897of a compiler from Pascal to MIPS used by François Pottier for a course at the
898Ecole Polytechnique.
899
900The main peculiarities of the CerCo compiler are:
901\begin{enumerate}
902\item all of our intermediate languages include label emitting instructions to
903implement the labeling approach, and the compiler preserves execution traces.
904\item traditionally compilers target an assembly language with additional
905macro-instructions to be expanded before assembly; for CerCo we need to go all
906the way down to object code in order to perform the static analysis. Therefore
907we integrated also an optimizing assembler and a static analyser.
908\item to avoid implementing a linker and a loader, we do not support separate
909compilation and external calls. Adding a linker and a loader is a transparent
910process to the labeling approach and should create no unknown problem.
911\item we target an 8-bit processor. Targeting an 8 bit processor requires
912several changes and increased code size, but it is not fundamentally more
913complex. The proof of correctness, however, becomes much harder.
914\item we target a microprocessor that has a non uniform memory model, which is
915still often the case for microprocessors used in embedded systems and that is
916becoming common again in multi-core processors. Therefore the compiler has to
917keep track of data and it must move data between memory regions in the proper
918way. Also the size of pointers to different regions is not uniform. An
919additional difficulty was that the space available for the stack in internal
920memory in the 8051 is tiny, allowing only a minor number of nested calls. To
921support full recursion in order to test the CerCo tools also on recursive
922programs, the compiler implements a stack in external memory.
923\end{enumerate}
924
925\subsection{A formal certification of the CerCo compiler}
926We implemented the
927CerCo compiler in the interactive theorem prover Matita and have formally
928certified that the cost model induced on the source code correctly and precisely
929predicts the object code behaviour. We actually induce two cost models, one for
930time and one for stack space used. We show the correctness of the prediction
931only for those programs that do not exhaust the available machine stack space, a
932property that thanks to the stack cost model we can statically analyse on the
933source code using our Frama-C tool. The preservation of functional properties we
934take as an assumption, not itself formally proved in CerCo.  Other projects have
935already certified the preservation of functional semantics in similar compilers,
936and we have not attempted to directly repeat that work. In order to complete the
937proof for non-functional properties, we have introduced a new semantics for
938programming languages based on a new kind of structured observables with the
939relative notions of forward similarity and the study of the intentional
940consequences of forward similarity. We have also introduced a unified
941representation for back-end intermediate languages that was exploited to provide
942a uniform proof of forward similarity.
943
944The details on the proof techniques employed
945and
946the proof sketch can be collected from the CerCo deliverables and papers.
947In this section we will only hint at the correctness statement, which turned
948out to be more complex than what we expected at the beginning.
949
950\paragraph{The statement.}
951Real time programs are often reactive programs that loop forever responding to
952events (inputs) by performing some computation followed by some action (output)
953and the return to the initial state. For looping programs the overall execution
954time does not make sense. The same happens for reactive programs that spend an
955unpredictable amount of time in I/O. What is interesting is the reaction time
956that measure the time spent between I/O events. Moreover, we are interested in
957predicting and ruling out programs that crash running out of space on a certain
958input.
959Therefore we need to look for a statement that talks about sub-runs of a
960program. The most natural statement is that the time predicted on the source
961code and spent on the object code by two corresponding sub-runs are the same.
962The problem to solve to make this statement formal is how to identify the
963corresponding sub-runs and how to single out those that are meaningful.
964The solution we found is based on the notion of measurability. We say that a run
965has a \emph{measurable sub-run} when both the prefix of the sub-run and the
966sub-run do not exhaust the stack space, the number of function calls and returns
967in the sub-run is the same, the sub-run does not perform any I/O and the sub-run
968starts with a label emission statement and ends with a return or another label
969emission statements. The stack usage is estimated using the stack usage model
970that is computed by the compiler.
971
972The statement that we formally proved is that for each C run with a measurable
973sub-run there exists an object code run with a sub-run such that the observables
974of the pairs of prefixes and sub-runs are the same and the time spent by the
975object code in the sub-run is the same as the one predicted on the source code
976using the time cost model generated by the compiler.
977We briefly discuss the constraints for measurability. Not exhausting the stack
978space is a clear requirement of meaningfulness of a run, because source programs
979do not crash for lack of space while object code ones do. The balancing of
980function calls/returns is a requirement for precision: the labeling approach
981allows the scope of label emission statements to extend after function calls to
982minimize the number of labels. Therefore a label pays for all the instructions
983in a block, excluding those executed in nested function calls. If the number of
984calls/returns is unbalanced, it means that there is a call we have not returned
985to that could be followed by additional instructions whose cost has already been
986taken in account. To make the statement true (but less precise) in this
987situation, we could only say that the cost predicted on the source code is a
988safe bound, not that it is exact. The last condition on the entry/exit points of
989a run is used to identify sub-runs whose code correspond to a sequence of blocks
990that we can measure precisely. Any other choice would start/end the run in the
991middle of a block and we would be forced again to weaken the statement taking as
992a bound the cost obtained counting in all the instructions that precede the
993starting one in the block/follow the final one in the block.
994I/O operations can be performed in the prefix of the run, but not in the
995measurable sub-run. Therefore we prove that we can predict reaction times, but
996not I/O times, as it should be.
997
998\section{Future work}
999
1000All the CerCo software and deliverables can be found on-line at the
1001address \url{http://cerco.cs.unibo.it}.
1002
1003The results obtained so far are encouraging and provide evidence that
1004it is possible to perform static time and space analysis at the source level
1005without loosing accuracy, reducing the trusted code base and reconciling the
1006study of functional and non-functional properties of programs. The
1007techniques introduced seem to be scalable, cover both imperative and
1008functional languages and are compatible with every compiler optimization
1009considered by us so far.
1010
1011To prove that compilers can keep track of optimizations
1012and induce a precise cost model on the source code, we targeted a simple
1013architecture that admits a cost model that is execution history independent.
1014The most important future work is dealing with hardware architectures
1015characterized by history dependent stateful components, like caches and
1016pipelines. The main issue consists in assigning a parametric, dependent cost
1017to basic blocks that can be later transferred by the labeling approach to
1018the source code and represented in a meaningful way to the user. The dependent
1019labeling approach that we have studied seems a promising tools to achieve
1020this goal, but the cost model generated for a realistic processor could be too
1021large and complex to be exposed in the source code. Further study is required
1022to evaluate the technique on a realistic processor and to introduce early
1023approximations of the cost model to make the technique feasible.
1024
1025Examples of further future work consist in improving the cost invariant
1026generator algorithms and the coverage of compiler optimizations, in combining
1027the labeling approach with type \& effect discipline \cite{typeffects}
1028to handle languages with implicit memory management, and in experimenting with
1029our tools in early development phases. Some larger case study is also necessary
1030to evaluate the CerCo's prototype on industrial size programs.
1031
1032\bibliographystyle{llncs}
1033\begin{thebibliography}{19}
1034
1035\bibitem{survey} \textbf{A Survey of Static Program Analysis Techniques}
1036W.~W\"ogerer. Technical report. Technische Universit\"at Wien 2005
1037
1038\bibitem{cerco} \textbf{Certified Complexity}. R.M. Amadio, A. Asperti, N. Ayache,
1039B. Campbell, D. Mulligan, R. Pollack, Y. Regis-Gianas, C. Sacerdoti Coen, I.
1040Stark, in Procedia Computer Science, Volume 7, 2011, Proceedings of the 2 nd
1041European Future Technologies Conference and Exhibition 2011 (FET 11), 175-177.
1042
1043\bibitem{labeling} \textbf{Certifying and Reasoning on Cost Annotations in C
1044Programs}, N.  Ayache, R.M. Amadio, Y.Régis-Gianas, in Proc. FMICS, Springer
1045LNCS
10467437: 32-46, 2012.
1047%, DOI:10.1007/978-3-642-32469-7\_3.
1048
1049\bibitem{labeling2} \textbf{Certifying and reasoning on cost annotations of
1050functional programs}.
1051R.M. Amadio, Y. Régis-Gianas. Proceedings of the Second international conference
1052on Foundational and Practical Aspects of Resource Analysis FOPARA 2011 Springer
1053LNCS 7177:72-89, 2012.
1054
1055\bibitem{compcert} \textbf{Formal verification of a realistic compiler}. X. Leroy,  In Commun. ACM 52(7), 107–115, 2009.
1056
1057\bibitem{framac} \textbf{Frama-C user manual}. L. Correnson, P. Cuoq, F. Kirchner, V. Prevosto, A. Puccetti, J. Signoles,
1058B. Yakobowski. in CEA-LIST, Software Safety Laboratory, Saclay, F-91191,
1059\url{http://frama-c.com/}.
1060
1061\bibitem{paolo} \textbf{Indexed Labels for Loop Iteration Dependent Costs}. P.
1062Tranquilli, in Proceedings of the 11th International Workshop on Quantitative
1063Aspects of Programming Languages and Systems (QAPL 2013), Rome, 23rd-24th March
10642013, Electronic Proceedings in Theoretical Computer Science, to appear in 2013.
1065
1066\bibitem{separation} \textbf{Intuitionistic reasoning about shared mutable data
1067structure} J.C. Reynolds. In Millennial Perspectives in Computer Science,
1068pages 303–321, Houndsmill, Hampshire, 2000. Palgrave.
1069
1070\bibitem{lustre} \textbf{LUSTRE: a declarative language for real-time
1071programming} 
1072P. Caspi, D. Pilaud, N. Halbwachs, J.A. Plaice. In Proceedings of
1073the 14th ACM SIGACT-SIGPLAN symposium on Principles of programming languages ACM
10741987.
1075
1076\bibitem{correctness} \textbf{On the correctness of an optimising assembler for
1077the intel MCS-51 microprocessor}.
1078  D.P. Mulligan, C. Sacerdoti Coen. In Proceedings of the Second
1079international conference on Certified Programs and Proofs, Springer-Verlag 2012.
1080
1081\bibitem{proartis} \textbf{PROARTIS: Probabilistically Analysable Real-Time
1082Systems}, F.J. Cazorla, E. Quiñones, T. Vardanega, L. Cucu, B. Triquet, G.
1083Bernat, E. Berger, J. Abella, F. Wartel, M. Houston, et al., in ACM Transactions
1084on Embedded Computing Systems, 2012.
1085
1086\bibitem{embounded} \textbf{The EmBounded project (project paper)}. K. Hammond,
1087R. Dyckhoff, C. Ferdinand, R. Heckmann, M. Hofmann, H. Loidl, G. Michaelson, J.
1088Serot, A. Wallace, in Trends in Functional Programming, Volume 6, Intellect
1089Press, 2006.
1090
1091\bibitem{matita}
1092\textbf{The Matita Interactive Theorem Prover}.
1093A. Asperti, C. Sacerdoti Coen, W. Ricciotti, E. Tassi.
109423rd International Conference on Automated Deduction, CADE 2011.
1095
1096\bibitem{typeffects} \textbf{The Type and Effect Discipline}. J.-P. Talpin,
1097 P. Jouvelot.
1098  In Proceedings of the Seventh Annual Symposium on Logic in Computer Science
1099(LICS '92), Santa Cruz, California, USA, June 22-25, 1992.
1100IEEE Computer Society 1992.
1101
1102\bibitem{stateart} \textbf{The worst-case execution-time problem overview of
1103methods
1104and survey of tools.} R. Wilhelm et al., in  ACM Transactions on Embedded
1105Computing Systems, 7:1–53, May 2008.
1106
1107%\bibitem{proartis2} \textbf{A Cache Design for Probabilistic Real-Time
1108% Systems}, L. Kosmidis, J. Abella, E. Quinones, and F. Cazorla, in Design,
1109% Automation, and Test in Europe (DATE), Grenoble, France, 03/2013.
1110
1111\end{thebibliography}
1112
1113
1114%\bibliography{fopara13.bib}
1115
1116\end{document}
Note: See TracBrowser for help on using the repository browser.