source: Papers/fopara2013/fopara13.tex @ 3328

Last change on this file since 3328 was 3328, checked in by tranquil, 6 years ago

cut some text, reordered bibliography

File size: 61.5 KB
Line 
1\documentclass{llncs}
2
3\usepackage{amsfonts}
4\usepackage{amsmath}
5\usepackage{amssymb} 
6\usepackage[english]{babel}
7\usepackage{color}
8\usepackage{fancybox}
9\usepackage{fancyvrb}
10\usepackage{graphicx}
11\usepackage[colorlinks,
12            bookmarks,bookmarksopen,bookmarksdepth=2]{hyperref}
13\usepackage{hyphenat}
14\usepackage[utf8x]{inputenc}
15\usepackage{listings}
16\usepackage{mdwlist}
17\usepackage{microtype}
18\usepackage{stmaryrd}
19\usepackage{url}
20
21\usepackage{tikz}
22\usetikzlibrary{positioning,calc,patterns,chains,shapes.geometric,scopes}
23
24%\renewcommand{\verb}{\lstinline}
25%\def\lstlanguagefiles{lst-grafite.tex}
26%\lstset{language=Grafite}
27\lstset{language=C}
28
29\newlength{\mylength}
30\newenvironment{frametxt}%
31        {\setlength{\fboxsep}{5pt}
32                \setlength{\mylength}{\linewidth}%
33                \addtolength{\mylength}{-2\fboxsep}%
34                \addtolength{\mylength}{-2\fboxrule}%
35                \Sbox
36                \minipage{\mylength}%
37                        \setlength{\abovedisplayskip}{0pt}%
38                        \setlength{\belowdisplayskip}{0pt}%
39                }%
40                {\endminipage\endSbox
41                        \[\fbox{\TheSbox}\]}
42
43\title{Certified Complexity (CerCo)\thanks{The project CerCo acknowledges the
44financial support of the Future and Emerging Technologies (FET) programme within
45the Seventh Framework Programme for Research of the European Commission, under
46FET-Open grant number: 243881}}
47\author{
48%Roberto
49R. M. Amadio$^{3,4}$ \and
50%Nicolas
51N. Ayache$^4$ \and
52%François
53F. Bobot$^4$ \and
54%Jacob
55J. P. Boender$^1$ \and
56%Brian
57B. Campbell$^2$ \and
58%Ilias
59I. Garnier$^2$ \and
60%Antoine
61A. Madet$^4$ \and
62%James
63J. McKinna$^2$ \and
64%Dominic
65D. P. Mulligan$^1$ \and
66%Mauro
67M. Piccolo$^1$ \and
68%Yann
69Y. R\'egis-Gianas$^4$ \and
70%Claudio
71C. Sacerdoti Coen$^1$ \and
72%Ian
73I. Stark$^2$ \and
74%Paolo
75P. Tranquilli$^1$}
76\institute{Dipartimento di Informatica - Scienza e Ingegneria, Universit\'a di
77Bologna \and
78LFCS, University of Edinburgh
79\and INRIA (Team $\pi$r2 )
80\and
81Universit\`e Paris Diderot
82}
83
84\bibliographystyle{splncs03}
85
86\begin{document}
87
88\maketitle
89
90\begin{abstract}
91This paper provides an overview of the FET-Open Project CerCo
92(Certified Complexity). The main achievement of the project has been
93the introduction of a technique to perform static analysis of non-functional
94properties of programs (time, space) at the source level, without loosing
95accuracy and with a small trusted code base. The main software component
96developed is a formally certified complexity certifying compiler. The compiler
97translates the source code to the object code and an instrumented copy of the
98source code. The latter exposes at
99the source level and tracks precisely the actual (non asymptotic)
100computational cost. Untrusted invariant generators and trusted theorem provers
101are then used to compute and certify the parametric execution time of the
102code.
103\end{abstract}
104
105% ---------------------------------------------------------------------------- %
106% SECTION                                                                      %
107% ---------------------------------------------------------------------------- %
108\section{Introduction}
109
110\paragraph{Problem statement.} Computer programs can be specified with both
111functional constraints (what a program must do) and non-functional constraints
112(e.g. what resources – time, space, etc – the program may use).  In the current
113state of the art, functional properties are verified for high-level source code
114by combining user annotations (e.g. preconditions and invariants) with a
115multitude of automated analyses (invariant generators, type systems, abstract
116interpretation, theorem proving, etc.). By contrast, non-functional properties
117are generally checked on low-level object code, but also demand information
118about high-level functional behavior that must somehow be recreated.
119
120This situation presents several problems: 1) it can be hard to infer this
121high-level structure in the presence of compiler optimizations; 2) techniques
122working on object code are not useful in early development;  yet problems
123detected later are more expensive to tackle; 3) parametric cost analysis is very
124hard: how can we reflect a cost that depends on the execution state (e.g. the
125value of a register or a carry bit) to a cost that the user can understand
126looking at source code?; 4) functional analysis performed only on object code
127makes any contribution from the programmer hard, leading to less precision in
128the estimates.
129
130\paragraph{Vision and approach.} We want to reconcile functional and
131non-functional analysis: to share information and perform both at the same time
132on source code.
133%
134What has previously prevented this approach is the lack of a uniform and precise
135cost model for high-level code: 1) each statement occurrence is compiled
136differently and optimizations may change control flow; 2) the cost of an object
137code instruction may depend on the runtime state of hardware components like
138pipelines and caches, which is not visible in the source code.
139
140To solve the issue, we envision a new generation of compilers able to keep track
141of program structure through compilation and optimisation, and to exploit that
142information to define a cost model for source code that is precise, non-uniform,
143and accounts for runtime state. With such a source-level cost model we can
144reduce non-functional verification to the functional case and exploit the state
145of the art in automated high-level verification \cite{survey}. The techniques
146previously used by Worst Case Execution Time (WCET) analysers on the object code
147are still available, but can now be coupled with additional source-level
148analysis. Where the approach produces precise cost models too complex to reason
149about, safe approximations can be used to trade complexity with precision.
150Finally, source code analysis can be made during early development stages, when
151components have been specified but not implemented: source code modularity means
152that it is enough to specify the non-functional behavior of unimplemented
153components.
154
155\paragraph{Contributions.} We have developed the labeling approach \cite{labeling}, a
156technique to implement compilers that induce cost models on source programs by
157very lightweight tracking of code changes through compilation. We have studied
158how to formally prove the correctness of compilers implementing this technique.
159We have implemented such a compiler from C to object binaries for the 8051
160microcontroller, and verified it in an interactive theorem prover. We have
161implemented a Frama-C plugin \cite{framac} that invokes the compiler on a source
162program and uses this to generate invariants on the high-level source that
163correctly model low-level costs. Finally, the plugin certifies that the program
164respects these costs by calling automated theorem provers, a new and innovative
165technique in the field of cost analysis. As a case study, we show how the
166plugin can automatically compute and certify the exact reaction time of Lustre
167\cite{lustre} dataflow programs compiled into C.
168
169\section{Project context and objectives}
170Formal methods for verification of functional properties of programs have
171reached a level of maturity and automation that is allowing a slow but
172increasing adoption in production environments. For safety critical code, it is
173getting usual to combine rigorous software engineering methodologies and testing
174with static analysis in order to benefit from the strong points of every
175approach and mitigate the weaknesses. Of particular interest are open frameworks
176for the combination of different formal methods, where the programs can be
177progressively specified and are continuously enriched with new safety
178guarantees: every method contributes knowledge (e.g. new invariants) that
179becomes an assumption for later analysis.
180
181The scenario for the verification of non-functional properties (time spent,
182memory used, energy consumed) is bleaker and the future seems to be getting even
183worse. Most industries verify that real time systems meets their deadlines
184simply measuring the time spent in many runs of the systems,  computing the
185maximum time and adding an empirical safety margin, claiming the result to be a
186bound for the WCET of the program. Formal methods and software to statically
187analyse the WCET of programs exist, but they often produce bounds that are too
188pessimistic to be useful. Recent advancements in hardware architectures is all
189focused on the improvement of the average case performance, not the
190predictability of the worst case. Execution time is getting more and more
191dependent from the execution history, that determines the internal state of
192hardware components like pipelines and caches. Multi-core processors and non
193uniform memory models are drastically reducing the possibility of performing
194static analysis in isolation, because programs are less and less time
195composable. Clock precise hardware models are necessary to static analysis, and
196getting them is becoming harder as a consequence of the increased hardware
197complexity.
198
199Despite all the above, the need for reliable real time systems and programs is
200increasing, and there is a raising pressure from the research community towards
201the differentiation of hardware. The aim is the introduction of alternative
202hardware with a more predictable behaviour, more suitable to be statically
203analysed. An example is the decoupling execution time from the execution history
204by introducing randomization \cite{proartis}.
205
206In the CerCo project we do not try to address this problem, optimistically
207assuming that static analysis of non-functional properties of programs will
208return to be feasible in the long term. The main objective of our work is
209instead to bring together static analysis of functional and non-functional
210properties, which, according to the current state of the art, are completely
211independent activities with limited exchange of information: while the
212functional properties are verified on the source code, the analysis of
213non-functional properties is entirely performed on the object code to exploit
214clock precise hardware models.
215
216There are two main reasons to currently perform the analysis on the object code.
217The first one is the obvious lack of a uniform, precise cost model for source
218code instructions (or even basic blocks). During compilation, high level
219instructions are torn apart and reassembled in context-specific ways so that
220identifying a fragment of object code with a single high level instruction is
221unfeasible. Even the control flow of the object and source code can be very
222different as a result of optimizations, for exampe due to aggressive loop
223optimisations. Despite the lack of a uniform, compilation- and
224program-independent cost model on the source language, the literature on the
225analysis of non asymptotic execution time on high level languages that assumes
226such a model is growing and getting momentum. Its practical usefulness is doomed
227to be minimal, unless we can provide a replacement for such cost models. Some
228hope has been provided by the EmBounded project \cite{embounded}, which
229compositionally compiles high level code to a byte code that is executed by an
230emulator with guarantees on the maximal execution time spent for each byte code
231instruction. The approach indeed provides a uniform model, at the price of
232loosing precision of the model (each cost is a pessimistic upper bound) and
233performance of the executed code (because the byte code is emulated
234compositionally instead of performing a fully non compositional compilation).
235%
236The second reason to perform the analysis on the object code is that bounding
237the worst case execution time of small code fragments in isolation (e.g. loop
238bodies) and then adding up the bounds yields very poor estimates because no
239knowledge on the hardware state can be assumed when executing the fragment. By
240analysing longer runs the bound obtained becomes more precise because the lack
241of knowledge on the initial state has less effects on longer computations.
242
243In CerCo we propose a radically new approach to the problem: we reject the idea
244of a uniform cost model and we propose that the compiler, which knows how the
245code is translated, must return the cost model for basic blocks of high level
246instructions. It must do so by keeping track of the control flow modifications
247to reverse them and by interfacing with static analysers.
248
249By embracing compilation, instead of avoiding it like EmBounded did, a CerCo
250compiler can at the same time produce efficient code and return costs that are
251as precise as the static analysis can be. Moreover, we allow our costs to be
252parametric: the cost of a block can depend on actual program data or on a
253summary of the execution history or on an approximated representation of the
254hardware state. For example, loop optimizations may assign to a loop body a cost
255that is a function of the number of iterations performed. As another example,
256the cost of a block may be a function of the vector of stalled pipeline states,
257which can be exposed in the source code and updated at each basic block exit. It
258is parametricity that allows to analyse small code fragments without loosing
259precision: in the analysis of the code fragment we do not have to ignore the
260initial hardware state. On the contrary, we can assume to know exactly which
261state (or mode, as WCET literature calls it) we are in.
262
263The cost of an execution is the sum of the cost of basic blocks multiplied by
264the number of times they are executed, which is a functional property of the
265program. Therefore, in order to perform (parametric) time analysis of programs,
266it is necessary to combine a cost model with control and data flow analysis.
267Current state of the art WCET technology
268\cite{stateart} performs the analysis on the object code, where the logic of the
269program is harder to reconstruct and most information available on the source
270code has been lost. Imprecision in the analysis leads to useless bounds. To
271augment precision, the tools ask the user to provide constraints on the object
272code control flow, usually in the form of bounds on the number of iterations of
273loops or linear inequalities on them. This requires the user to manually link
274the source and object code, translating his assumptions on the source code
275(which may be wrong) to object code constraints. The task is error prone and
276hard, in presence of complex optimizations.
277
278The CerCo approach has the potentiality to dramatically improve the state of the
279art. By performing control and data flow analysis on the source code, the error
280prone translation of invariants is completely avoided. It is in fact performed
281by the compiler itself when it induces the cost model on the source language.
282Moreover, any available technique for the verification of functional properties
283can be immediately reused and multiple techniques can collaborate together to
284infer and certify cost invariants for the program. Parametric cost analysis
285becomes the default one, with non parametric bounds used as last resorts when
286trading the complexity of the analysis with its precision. A priori, no
287technique previously used in traditional WCET is lost: they can just be applied
288on the source code.
289
290Traditional techniques for WCET that work on object code are also affected by
291another problem: they cannot be applied before the generation of the object
292code. Functional properties can be analysed in early development stages, while
293analysis of non-functional properties may come too late to avoid expensive
294changes to the program architecture. Our approach already works in early
295development stages by axiomatically attaching costs to unimplemented components.
296
297All software used to verify properties of programs must be as bug free as
298possible. The trusted code base for verification is made by the code that needs
299to be trusted to believe that the property holds. The trusted code base of
300state-of-the-art WCET tools is very large: one needs to trust the control flow
301analyser and the linear programming libraries it uses and also the formal models
302of the hardware. In CerCo we are moving the control flow analysis to the source
303code and we are introducing a non standard compiler too. To reduce the trusted
304code base, we implemented a prototype and a static analyser in an interactive
305theorem prover, which was used to certify that the cost computed on the source
306code is indeed the one actually spent by the hardware. Formal models of the
307hardware and of the high level source languages were also implemented in the
308interactive theorem prover. Control flow analysis on the source code has been
309obtained using invariant generators, tools to produce proof obligations from
310generated invariants and automatic theorem provers to verify the obligations. If
311the automatic provers are able to generate proof traces that can be
312independently checked, the only remaining component that enters the trusted code
313base is an off-the-shelf invariant generator which, in turn, can be proved
314correct using an interactive theorem prover. Therefore we achieve the double
315objective of allowing to use more off-the-shelf components (e.g. provers and
316invariant generators) while reducing the trusted code base at the same time.
317
318%\paragraph{Summary of the CerCo objectives.} To summarize, the goal of CerCo is
319% to reconcile functional with non-functional analysis by performing them together
320% on the source code, sharing common knowledge about execution invariants. We want
321% to achieve the goal implementing a new generation of compilers that induce a
322% parametric, precise cost model for basic blocks on the source code. The compiler
323% should be certified using an interactive theorem prover to minimize the trusted
324% code base of the analysis. Once the cost model is induced, off-the-shelf tools
325% and techniques can be combined together to infer and prove parametric cost
326% bounds.
327%The long term benefits of the CerCo vision are expected to be:
328%1. the possibility to perform static analysis during early development stages
329%2.  parametric bounds made easier
330%3.  the application of off-the-shelf techniques currently unused for the
331% analysis of non-functional properties, like automated proving and type systems
332%4. simpler and safer interaction with the user, that is still asked for
333% knowledge, but on the source code, with the additional possibility of actually
334% verifying the provided knowledge
335%5. a reduced trusted code base
336%6. the increased accuracy of the bounds themselves.
337%
338%The long term success of the project is hindered by the increased complexity of
339% the static prediction of the non-functional behavior of modern hardware. In the
340% time frame of the European contribution we have focused on the general
341% methodology and on the difficulties related to the development and certification
342% of a cost model inducing compiler.
343
344\section{The typical CerCo workflow}
345\begin{figure}[!t]
346\begin{tabular}{l@{\hspace{0.2cm}}|@{\hspace{0.2cm}}l}
347\begin{lstlisting}
348char a[] = {3, 2, 7, -4};
349char treshold = 4;
350
351int main() {
352  char j;
353  char *p = a;
354  int found = 0;
355  for (j=0; j < 4; j++) {
356    if (*p <= treshold)
357      { found++; }
358    p++;
359  }
360  return found;
361}
362\end{lstlisting}
363&
364%  $\vcenter{\includegraphics[width=7.5cm]{interaction_diagram.pdf}}$
365\begin{tikzpicture}[
366    baseline={([yshift=-.5ex]current bounding box)},
367    element/.style={draw, text width=1.6cm, on chain, text badly centered},
368    >=stealth
369    ]
370{[start chain=going below, node distance=.5cm]
371\node [element] (cerco) {CerCo\\compiler};
372\node [element] (cost)  {CerCo\\cost plugin};
373{[densely dashed]
374\node [element] (ded)   {Deductive\\platform};
375\node [element] (check) {Proof\\checker};
376}
377}
378\coordinate [left=3.5cm of cerco] (left);
379{[every node/.style={above, text width=3.5cm, text badly centered,
380                     font=\scriptsize}]
381\draw [<-] ([yshift=-1ex]cerco.north west) coordinate (t) --
382    node {C source}
383    (t-|left);
384\draw [->] (cerco) -- (cost);
385\draw [->] ([yshift=1ex]cerco.south west) coordinate (t) --
386    node {C source+cost annotations}
387    (t-|left) coordinate (cerco out);
388\draw [->] ([yshift=1ex]cost.south west) coordinate (t) --
389    node {C source+cost annotations\\+synthesized assertions}
390    (t-|left) coordinate (out);
391{[densely dashed]
392\draw [<-] ([yshift=-1ex]ded.north west) coordinate (t) --
393    node {C source+cost annotations\\+complexity assertions}
394    (t-|left) coordinate (ded in) .. controls +(-.5, 0) and +(-.5, 0) .. (out);
395\draw [->] ([yshift=1ex]ded.south west) coordinate (t) --
396    node {complexity obligations}
397    (t-|left) coordinate (out);
398\draw [<-] ([yshift=-1ex]check.north west) coordinate (t) --
399    node {complexity proof}
400    (t-|left) .. controls +(-.5, 0) and +(-.5, 0) .. (out);
401\draw [dash phase=2.5pt] (cerco out) .. controls +(-1, 0) and +(-1, 0) ..
402    (ded in);
403}}
404%% user:
405% head
406\draw (current bounding box.west) ++(-.2,.5)
407    circle (.2) ++(0,-.2) -- ++(0,-.1) coordinate (t);
408% arms
409\draw (t) -- +(-.3,-.2);
410\draw (t) -- +(.3,-.2);
411% body
412\draw (t) -- ++(0,-.4) coordinate (t);
413% legs
414\draw (t) -- +(-.2,-.6);
415\draw (t) -- +(.2,-.6);
416\end{tikzpicture}
417\end{tabular}
418\caption{On the left: code to count the array's elements
419 greater or equal to the treshold. On the right: CerCo's interaction
420 diagram. CerCo's components are drawn solid.}
421\label{test1}
422\end{figure}
423We illustrate the workflow we envision (on the right of \autoref{test1})
424on an example program (on the left of \autoref{test1}).
425The user writes the program and feeds it to the Cerco compiler, which outputs
426an instrumented version of the same program that updates global variables
427that record the elapsed execution time and the stack space usage.
428The red lines in \autoref{itest1} are the instrumentation introduced by the
429compiler. The annotated program can then be enriched with complexity assertions
430in the style of Hoare logic, that are passed to a deductive platform (in our
431case Frama-C). We provide as a Frama-c cost plugin a simple automatic
432synthesiser for complexity assertions (the blue lines in \autoref{itest1}),
433which can be overridden by the user to increase or decrease accuracy. From the
434assertions, a general purpose deductive platform produces proof obligations
435which in turn can be closed by automatic or interactive provers, ending in a
436proof certificate. Nine proof obligations are generated
437from~\autoref{itest1} (to prove that the loop invariant holds after
438one execution if it holds before, to prove that the whole program execution
439takes at most 1228 cycles, etc.). The CVC3 prover closes all obligations
440in a few seconds on standard hardware.
441%
442\fvset{commandchars=\\\{\}}
443\lstset{morecomment=[s][\color{blue}]{/*@}{*/},
444        moredelim=[is][\color{blue}]{$}{$},
445        moredelim=[is][\color{red}]{|}{|}}
446\begin{figure}[!p]
447\begin{lstlisting}
448|int __cost = 33; int __stack = 5, __stack_max = 5;|
449
450|void __cost_incr(int incr) { __cost = __cost + incr; }|
451
452|void __stack_incr(int incr) {
453  __stack = __stack + incr;
454  __stack_max = __stack_max < __stack ? __stack : __stack_max;
455}|
456
457char a[4] = { 3, 2, 7, 252, };
458char treshold = 4;
459
460/*@ behavior stack_cost:
461      ensures __stack_max <=
462              __max(\old(__stack_max), \old(__stack));
463      ensures __stack == \old(__stack);
464     
465    behavior time_cost:
466      ensures __cost <= \old(__cost)+1228; */
467int main(void)
468{
469  char j;
470  char *p;
471  int found;
472  |__stack_incr(0); __cost_incr(91);|
473  p = a;
474  found = 0;
475  $__l: /* internal */$
476  /*@ for time_cost: loop invariant
477        __cost <=
478        \at(__cost,__l)+220*(__max(\at(5-j,__l), 0)-__max (5-j, 0));
479      for stack_cost: loop invariant
480        __stack_max == \at(__stack_max,__l);
481      for stack_cost: loop invariant
482        __stack == \at(__stack,__l);
483      loop variant 4-j; */
484  for (j = 0; j < 4; j++) {
485    |__cost_incr(76);|
486    if (*p <= treshold) {
487      |__cost_incr(144);|
488      found++;
489    } else {
490      |__cost_incr(122);|
491    }
492    p++;
493  }
494  |__cost_incr(37); __stack_incr(-0);|
495  return found;
496}
497\end{lstlisting}
498\caption{The instrumented version of the program in \autoref{test1},
499 with instrumentation added by the CerCo compiler in red and cost invariants
500 added by the CerCo Frama-C plugin in blue. The \texttt{\_\_cost},
501 \texttt{\_\_stack} and \texttt{\_\_stack\_max} variables hold the elapsed time
502in clock cycles and the current and max stack usage. Their initial values
503hold the clock cycles spent in initializying the global data before calling
504\texttt{main} and the space required by global data (and thus unavailable for
505the stack).
506}
507\label{itest1}
508\end{figure}
509\section{Main S\&T results of the CerCo project}
510We will now review the main S\&T results achieved in the CerCo project.
511
512% \emph{Dependent labeling~\cite{?}.} The basic labeling approach assumes a
513% bijective mapping between object code and source code O(1) blocks (called basic
514% blocks). This assumption is violated by many program optimizations (e.g. loop
515% peeling and loop unrolling). It also assumes the cost model computed on the
516% object code to be non parametric: every block must be assigned a cost that does
517% not depend on the state. This assumption is violated by stateful hardware like
518% pipelines, caches, branch prediction units. The dependent labeling approach is
519% an extension of the basic labeling approach that allows to handle parametric
520% cost models. We showed how the method allows to deal with loop optimizations and
521% pipelines, and we speculated about its applications to caches.
522%
523% \emph{Techniques to exploit the induced cost model.} Every technique used for
524% the analysis of functional properties of programs can be adapted to analyse the
525% non-functional properties of the source code instrumented by compilers that
526% implement the labeling approach. In order to gain confidence in this claim, we
527% showed how to implement a cost invariant generator combining abstract
528% interpretation with separation logic ideas \cite{separation}. We integrated
529% everything in the Frama-C modular architecture, in order to compute proof
530% obligations from functional and cost invariants and to use automatic theorem
531% provers on them. This is an example of a new technique that is not currently
532% exploited in WCET analysis. It also shows how precise functional invariants
533% benefits the non-functional analysis too. Finally, we show how to fully
534% automatically analyse the reaction time of Lustre nodes that are first compiled
535% to C using a standard Lustre compiler and then processed by a C compiler that
536% implements the labeling approach.
537
538% \emph{The CerCo compiler.} This is a compiler from a large subset of the C
539% program to 8051/8052 object code,
540% integrating the labeling approach and a static analyser for 8051 executables.
541% The latter can be implemented easily and does not require dependent costs
542% because the 8051 microprocessor is a very simple one, with constant-cost
543% instruction. It was chosen to separate the issue of exact propagation of the
544% cost model from the orthogonal problem of the static analysis of object code
545% that may require approximations or dependent costs. The compiler comes in
546% several versions: some are prototypes implemented directly in OCaml, and they
547% implement both the basic and dependent labeling approaches; the final version
548% is extracted from a Matita certification and at the moment implements only the
549% basic approach.
550
551\subsection{The (basic) labeling approach}
552The labeling approach is the main technique that
553underlies all the developments in CerCo. It allows to track the evolution of
554basic blocks during compilation in order to propagate the cost model from the
555object code to the source code without loosing precision in the process.
556\paragraph{Problem statement.} Given a source program $P$, we want to obtain an
557instrumented source program $P'$,  written in the same programming language, and
558the object code $O$ such that: 1) $P'$ is obtained by inserting into $P$ some
559additional instructions to update global cost information like the amount of
560time spent during execution or the maximal stack space required; 2) $P$ and $P'$ 
561must have the same functional behavior, i.e.\ they must produce that same output
562and intermediate observables; 3) $P$ and $O$ must have the same functional
563behavior; 4) after execution and in interesting points during execution, the
564cost information computed by $P'$ must be an upper bound of the one spent by $O$ 
565to perform the corresponding operations (soundness property); 5) the difference
566between the costs computed by $P'$ and the execution costs of $O$ must be
567bounded by a program dependent constant (precision property).
568
569\paragraph{The labeling software components.} We solve the problem in four
570stages \cite{labeling}, implemented by four software components that are used
571in sequence.
572
573The first component labels the source program $P$ by injecting label emission
574statements in appropriate positions to mark the beginning of basic blocks.
575% The
576% set of labels with their positions is called labeling.
577The syntax and semantics
578of the source programming language is augmented with label emission statements.
579The statement ``EMIT $\ell$'' behaves like a NOP instruction that does not affect the
580program state or control flow, but its execution is observable.
581% Therefore the observables of a run of a program becomes a stream
582% of label emissions: $\ell_1,\ldots,\ell_n$, called the program trace. We clarify the conditions
583% that the labeling must respect later.
584
585The second component is a labeling preserving compiler. It can be obtained from
586an existing compiler by adding label emission statements to every intermediate
587language and by propagating label emission statements during compilation. The
588compiler is correct if it preserves both the functional behavior of the program
589and the traces of observables.
590% We may also ask that the function that erases the cost
591% emission statements commute with compilation. This optional property grants that
592% the labeling does not interfere with the original compiler behavior. A further
593% set of requirements will be added later.
594
595The third component is a static object code analyser. It takes in input a labeled
596object code and it computes the scope of each of its label emission statements,
597i.e.\ the tree of instructions that may be executed after the statement and before
598a new label emission is encountered.
599It is a tree and not a sequence because the scope
600may contain a branching statement. In order to grant that such a finite tree
601exists, the object code must not contain any loop that is not broken by a label
602emission statement. This is the first requirement of a sound labeling. The
603analyser fails if the labeling is unsound. For each scope, the analyser
604computes an upper bound of the execution time required by the scope, using the
605maximum of the costs of the two branches in case of a conditional statement.
606Finally, the analyser computes the cost of a label by taking the maximum of the
607costs of the scopes of every statement that emits that label.
608
609The fourth and last component takes in input the cost model computed at step 3
610and the labelled code computed at step 1. It outputs a source program obtained
611by replacing each label emission statement with a statement that increments the
612global cost variable with the cost associated to the label by the cost model. 
613The obtained source code is the instrumented source code.
614
615\paragraph{Correctness.} Requirements 1, 2 and 3 of the problem statement
616obviously hold, with 2 and 3 being a consequence of the definition of a correct
617labeling preserving compiler. It is also obvious that the value of the global
618cost variable of an instrumented source code is at any time equal to the sum of
619the costs of the labels emitted by the corresponding labelled code. Moreover,
620because the compiler preserves all traces, the sum of the costs of the labels
621emitted in the source and target labelled code are the same. Therefore, to
622satisfy the fourth requirement, we need to grant that the time taken to execute
623the object code is equal to the sum of the costs of the labels emitted by the
624object code. We collect all the necessary conditions for this to happen in the
625definition of a sound labeling: a) all loops must be broken by a cost emission
626statement;  b) all program instructions must be in the scope of some cost
627emission statement. To satisfy also the fifth requirement, additional
628requirements must be imposed on the object code labeling to avoid all uses of
629the maximum in the cost computation: the labeling is precise if every label is
630emitted at most once and both branches of a conditional jump start with a label
631emission statement.
632
633The correctness and precision of the labeling approach only rely on the
634correctness and precision of the object code labeling. The simplest
635way to achieve them is to impose correctness and precision
636requirements also on the initial labeling produced by the first software
637component, and to ask the compiler to preserve these
638properties too. The latter requirement imposes serious limitations on the
639compilation strategy and optimizations: the compiler may not duplicate any code
640that contains label emission statements, like loop bodies. Therefore several
641loop optimizations like peeling or unrolling are prevented. Moreover, precision
642of the object code labeling is not sufficient per se to obtain global
643precision: we also implicitly assumed the static analysis to be able to
644associate a precise constant cost to every instruction. This is not possible in
645presence of stateful hardware whose state influences the cost of operations,
646like pipelines and caches. In the next subsection we will see an extension of the
647basic labeling approach to cover this situation.
648
649The labeling approach described in this section can be applied equally well and
650with minor modifications to imperative and functional languages
651\cite{labeling2}. We have tested it on a simple imperative language without
652functions (a While language), on a subset of C and on two compilation chains for
653a purely functional higher order language. The two main changes required to deal
654with functional languages are: 1) because global variables and updates are not
655available, the instrumentation phase produces monadic code to “update” the
656global costs; 2) the requirements for a sound and precise labeling of the
657source code must be changed when the compilation is based on CPS translations.
658In particular, we need to introduce both labels emitted before a statement is
659executed and labels emitted after a statement is executed. The latter capture
660code that is inserted by the CPS translation and that would escape all label
661scopes.
662
663Phases 1, 2 and 3 can be applied as well to logic languages (e.g. Prolog).
664However, the instrumentation phase cannot: in standard Prolog there is no notion
665of (global) variable whose state is not retracted during backtracking.
666Therefore, the cost of executing computations that are later backtracked would
667not be correctly counted in. Any extension of logic languages with
668non-backtrackable state should support the labeling approach.
669
670\subsection{Dependent labeling}
671The core idea of the basic labeling approach is to establish a tight connection
672between basic blocks executed in the source and target languages. Once the
673connection is established, any cost model computed on the object code can be
674transferred to the source code, without affecting the code of the compiler or
675its proof. In particular, it is immediate that we can also transport cost models
676that associate to each label functions from hardware state to natural numbers.
677However, a problem arises during the instrumentation phase that replaces cost
678emission statements with increments of global cost variables. The global cost
679variable must be incremented with the result of applying the function associated
680to the label to the hardware state at the time of execution of the block.
681The hardware state comprises both the functional state that affects the
682computation (the value of the registers and memory) and the non-functional
683state that does not (the pipeline and cache contents for example). The former is
684in correspondence with the source code state, but reconstructing the
685correspondence may be hard and lifting the cost model to work on the source code
686state is likely to produce cost expressions that are too hard to reason on.
687Luckily enough, in all modern architectures the cost of executing single
688instructions is either independent of the functional state or the jitter---the
689difference between the worst and best case execution times---is small enough
690to be bounded without loosing too much precision. Therefore we can concentrate
691on dependencies over the “non-functional” parts of the state only.
692
693The non-functional state has no correspondence in the high level state and does
694not influence the functional properties. What can be done is to expose the
695non-functional state in the source code. We just present here the basic
696intuition in a simplified form: the technical details that allow to handle the
697general case are more complex and can be found in the CerCo deliverables. We add
698to the source code an additional global variable that represents the
699non-functional state and another one that remembers the last labels emitted. The
700state variable must be updated at every label emission statement, using an
701update function which is computed during static analysis too. The update
702function associates to each label a function from the recently emitted labels
703and old state to the new state. It is computed composing the semantics of every
704instruction in a basic block and restricting it to the non-functional part of
705the state.
706
707Not all the details of the non-functional state needs to be exposed, and the
708technique works better when the part of state that is required can be summarized
709in a simple data structure. For example, to handle simple but realistic
710pipelines it is sufficient to remember a short integer that encodes the position
711of bubbles (stuck instructions) in the pipeline. In any case, the user does not
712need to understand the meaning of the state to reason over the properties of the
713program. Moreover, at any moment the user or the invariant generator tools that
714analyse the instrumented source code produced by the compiler can decide to
715trade precision of the analysis with simplicity by approximating the parametric
716cost with safe non parametric bounds. Interestingly, the functional analysis of
717the code can determine which blocks are executed more frequently in order to
718approximate more aggressively the ones that are executed less.
719
720Dependent labeling can also be applied to allow the compiler to duplicate
721blocks that contain labels (e.g. in loop optimizations) \cite{paolo}. The effect is to assign
722a different cost to the different occurrences of a duplicated label. For
723example, loop peeling turns a loop into the concatenation of a copy of the loop
724body (that executes the first iteration) with the conditional execution of the
725loop (for the successive iterations). Because of further optimizations, the two
726copies of the loop will be compiled differently, with the first body usually
727taking more time.
728
729By introducing a variable that keep tracks of the iteration number, we can
730associate to the label a cost that is a function of the iteration number. The
731same technique works for loop unrolling without modifications: the function will
732assign a cost to the even iterations and another cost to the odd ones. The
733actual work to be done consists in introducing in the source code and for each
734loop a variable that counts the number of iterations. The loop optimization code
735that duplicate the loop bodies must also modify the code to propagate correctly
736the update of the iteration numbers. The technical details are more complex and
737can be found in the CerCo reports and publications. The implementation, however,
738is quite simple and the changes to a loop optimizing compiler are minimal.
739
740\subsection{Techniques to exploit the induced cost model}
741We review the cost synthesis techniques developed in the project.
742The starting hypothesis is that we have a certified methodology to annotate
743blocks in the source code with constants which provide a sound and possibly
744precise upper bound on the cost of executing the blocks after compilation to
745object code.
746
747The principle that we have followed in designing the cost synthesis tools is
748that the synthetic bounds should be expressed and proved within a general
749purpose tool built to reason on the source code. In particular, we rely on the
750Frama-C tool to reason on C code and on the Coq proof-assistant to reason on
751higher-order functional programs.
752
753This principle entails that: 1)
754The inferred synthetic bounds are indeed correct as long as the general purpose
755tool is. 2) There is no limitation on the class of programs that can be handled
756as long as the user is willing to carry on an interactive proof.
757
758Of course, automation is desirable whenever possible. Within this framework,
759automation means writing programs that give hints to the general purpose tool.
760These hints may take the form, say, of loop invariants/variants, of predicates
761describing the structure of the heap, or of types in a light logic. If these
762hints are correct and sufficiently precise the general purpose tool will produce
763a proof automatically, otherwise, user interaction is required.
764
765\paragraph{The Cost plugin and its application to the Lustre compiler.}
766Frama-C \cite{framac} is a set of analysers for C programs with a
767specification language called ACSL. New analyses can be dynamically added
768through a plugin system. For instance, the Jessie plugin allows deductive
769verification of C programs with respect to their specification in ACSL, with
770various provers as back-end tools.
771We developed the CerCo Cost plugin for the Frama-C platform as a proof of
772concept of an automatic environment exploiting the cost annotations produced by
773the CerCo compiler. It consists of an OCaml program which in first approximation
774takes the following actions: 1) it receives as input a C program, 2) it
775applies the CerCo compiler to produce a related C program with cost annotations,
7763) it applies some heuristics to produce a tentative bound on the cost of
777executing the C functions of the program as a function of the value of their
778parameters, 4) the user can then call the Jessie plugin to discharge the
779related proof obligations.
780In the following we elaborate on the soundness of the framework and the
781experiments we performed with the Cost tool on the C programs produced by a
782Lustre compiler.
783
784\paragraph{Soundness.} The soundness of the whole framework depends on the cost
785annotations added by the CerCo compiler, the synthetic costs produced by the
786Cost plugin, the verification conditions (VCs) generated by Jessie, and the
787external provers discharging the VCs. The synthetic costs being in ACSL format,
788Jessie can be used to verify them. Thus, even if the added synthetic costs are
789incorrect (relatively to the cost annotations), the process as a whole is still
790correct: indeed, Jessie will not validate incorrect costs and no conclusion can
791be made about the WCET of the program in this case. In other terms, the
792soundness does not really depend on the action of the Cost plugin, which can in
793principle produce any synthetic cost. However, in order to be able to actually
794prove a WCET of a C function, we need to add correct annotations in a way that
795Jessie and subsequent automatic provers have enough information to deduce their
796validity. In practice this is not straightforward even for very simple programs
797composed of branching and assignments (no loops and no recursion) because a fine
798analysis of the VCs associated with branching may lead to a complexity blow up.
799\paragraph{Experience with Lustre.} Lustre is a data-flow language to program
800synchronous systems and the language comes with a compiler to C. We designed a
801wrapper for supporting Lustre files.
802The C function produced by the compiler implements the step function of the
803synchronous system and computing the WCET of the function amounts to obtain a
804bound on the reaction time of the system. We tested the Cost plugin and the
805Lustre wrapper on the C programs generated by the Lustre compiler. For programs
806consisting of a few hundreds loc, the Cost plugin computes a WCET and Alt −
807Ergo is able to discharge all VCs automatically.
808
809\paragraph{Handling C programs with simple loops.}
810The cost annotations added by the CerCo compiler take the form of C instructions
811that update by a constant a fresh global variable called the cost variable.
812Synthesizing a WCET of a C function thus consists in statically resolving an
813upper bound of the difference between the value of the cost variable before and
814after the execution of the function, i.e. find in the function the instructions
815that update the cost variable and establish the number of times they are passed
816through during the flow of execution. In order to do the analysis the plugin
817makes the following assumptions on the programs:
8181. No recursive functions.
8192. Every loop must be annotated with a variant. The variants of ‘for’ loops are
820automatically inferred.
821
822The plugin proceeds as follows.
823First the call graph of the program is computed.
824Then the computation of the cost of the function is performed by traversing its
825control flow graph. If the function f calls the function g then the function g
826is processed before the function f. The cost at a node is the maximum of the
827costs of the successors.
828In the case of a loop with a body that has a constant cost for every step of the
829loop, the cost is the product of the cost of the body and of the variant taken
830at the start of the loop.
831In the case of a loop with a body whose cost depends on the values of some free
832variables, a fresh logic function f is introduced to represent the cost of the
833loop in the logic assertions. This logic function takes the variant as a first
834parameter. The other parameters of f are the free variables of the body of the
835loop. An axiom is added to account the fact that the cost is accumulated at each
836step of the loop.
837The cost of the function is directly added as post-condition of the function
838
839The user can influence the annotation by different means:
840by using more precise variants or
841by annotating function with cost specification. The plugin will use this cost
842for the function instead of computing it.
843\paragraph{C programs with pointers.}
844When it comes to verifying programs involving pointer-based data structures,
845such as linked lists, trees, or graphs, the use of traditional first-order logic
846to specify, and of SMT solvers to verify, shows some limitations. Separation
847logic \cite{separation} is then an elegant alternative. It is a program logic
848with a new notion of conjunction to express spatial heap separation. Bobot has
849recently introduced in the Jessie plugin automatically generated separation
850predicates to simulate separation logic reasoning in a traditional verification
851framework where the specification language, the verification condition
852generator, and the theorem provers were not designed with separation logic in
853mind. CerCo's plugin can exploit the separation predicates to automatically
854reason on the cost of execution of simple heap manipulation programs such as an
855in-place list reversal.
856
857\subsection{The CerCo Compiler}
858In CerCo we have developed a certain number of cost preserving compilers based
859on the labeling approach. Excluding an initial certified compiler for a While
860language, all remaining compilers target realistic source languages---a pure
861higher order functional language and a large subset of C with pointers, gotos
862and all data structures---and real world target processors---MIPS and the
863Intel 8051/8052 processor family. Moreover, they achieve a level of optimization
864that ranges from moderate (comparable to gcc level 1) to intermediate (including
865loop peeling and unrolling, hoisting and late constant propagation). The so
866called Trusted CerCo Compiler is the only one that was implemented in the
867interactive theorem prover Matita~\cite{matita} and its costs certified. The code distributed
868is obtained extracting OCaml code from the Matita implementation. In the rest of
869this section we will only focus on the Trusted CerCo Compiler, that only targets
870the C language and the 8051/8052 family, and that does not implement the
871advanced optimizations yet. Its user interface, however, is the same as the one
872of the other versions, in order to trade safety with additional performances. In
873particular, the Frama-C CerCo plugin can work without recompilation with all
874compilers.
875
876The 8051/8052 microprocessor is a very simple one, with constant-cost
877instruction. It was chosen to separate the issue of exact propagation of the
878cost model from the orthogonal problem of the static analysis of object code
879that may require approximations or dependent costs.
880
881The (trusted) CerCo compiler implements the following optimizations: cast
882simplification, constant propagation in expressions, liveness analysis driven
883spilling of registers, dead code elimination, branch displacement, tunneling.
884The two latter optimizations are performed by our optimizing assembler
885\cite{correctness}. The back-end of the compiler works on three addresses
886instructions, preferred to static single assignment code for the simplicity of
887the formal certification.
888
889The CerCo compiler is loosely based on the CompCert compiler \cite{compcert}, a
890recently developed certified compiler from C to the PowerPC, ARM and x86
891microprocessors. Contrarily to CompCert, both the CerCo code and its
892certification are open source. Some data structures and language definitions for
893the front-end are directly taken from CompCert, while the back-end is a redesign
894of a compiler from Pascal to MIPS used by Francois Pottier for a course at the
895Ecole Polytechnique.
896
897The main peculiarities of the CerCo compiler are:
898\begin{enumerate}
899\item all of our intermediate languages include label emitting instructions to
900implement the labeling approach, and the compiler preserves execution traces.
901\item traditionally compilers target an assembly language with additional
902macro-instructions to be expanded before assembly; for CerCo we need to go all
903the way down to object code in order to perform the static analysis. Therefore
904we integrated also an optimizing assembler and a static analyser.
905\item to avoid implementing a linker and a loader, we do not support separate
906compilation and external calls. Adding a linker and a loader is a transparent
907process to the labeling approach and should create no unknown problem.
908\item we target an 8-bit processor. Targeting an 8 bit processor requires
909several changes and increased code size, but it is not fundamentally more
910complex. The proof of correctness, however, becomes much harder.
911\item we target a microprocessor that has a non uniform memory model, which is
912still often the case for microprocessors used in embedded systems and that is
913becoming common again in multi-core processors. Therefore the compiler has to
914keep track of data and it must move data between memory regions in the proper
915way. Also the size of pointers to different regions is not uniform. An
916additional difficulty was that the space available for the stack in internal
917memory in the 8051 is tiny, allowing only a minor number of nested calls. To
918support full recursion in order to test the CerCo tools also on recursive
919programs, the compiler manually manages a stack in external memory.
920\end{enumerate}
921
922\subsection{A formal certification of the CerCo compiler}
923We implemented the
924CerCo compiler in the interactive theorem prover Matita and have formally
925certified that the cost model induced on the source code correctly and precisely
926predicts the object code behavior. We actually induce two cost models, one for
927time and one for stack space used. We show the correctness of the prediction
928only for those programs that do not exhaust the available machine stack space, a
929property that thanks to the stack cost model we can statically analyse on the
930source code using our Frama-C tool. The preservation of functional properties we
931take as an assumption, not itself formally proved in CerCo.  Other projects have
932already certified the preservation of functional semantics in similar compilers,
933and we have not attempted to directly repeat that work. In order to complete the
934proof for non-functional properties, we have introduced a new semantics for
935programming languages based on a new kind of structured observables with the
936relative notions of forward similarity and the study of the intentional
937consequences of forward similarity. We have also introduced a unified
938representation for back-end intermediate languages that was exploited to provide
939a uniform proof of forward similarity.
940
941The details on the proof techniques employed
942and
943the proof sketch can be collected from the CerCo deliverables and papers.
944In this section we will only hint at the correctness statement, which turned
945out to be more complex than what we expected at the beginning.
946
947\paragraph{The statement.}
948Real time programs are often reactive programs that loop forever responding to
949events (inputs) by performing some computation followed by some action (output)
950and the return to the initial state. For looping programs the overall execution
951time does not make sense. The same happens for reactive programs that spend an
952unpredictable amount of time in I/O. What is interesting is the reaction time
953that measure the time spent between I/O events. Moreover, we are interested in
954predicting and ruling out programs that crash running out of space on a certain
955input.
956Therefore we need to look for a statement that talks about sub-runs of a
957program. The most natural statement is that the time predicted on the source
958code and spent on the object code by two corresponding sub-runs are the same.
959The problem to solve to make this statement formal is how to identify the
960corresponding sub-runs and how to single out those that are meaningful.
961The solution we found is based on the notion of measurability. We say that a run
962has a \emph{measurable sub-run} when both the prefix of the sub-run and the
963sub-run do not exhaust the stack space, the number of function calls and returns
964in the sub-run is the same, the sub-run does not perform any I/O and the sub-run
965starts with a label emission statement and ends with a return or another label
966emission statements. The stack usage is estimated using the stack usage model
967that is computed by the compiler.
968
969The statement that we formally proved is that for each C run with a measurable
970sub-run there exists an object code run with a sub-run such that the observables
971of the pairs of prefixes and sub-runs are the same and the time spent by the
972object code in the sub-run is the same as the one predicted on the source code
973using the time cost model generated by the compiler.
974We briefly discuss the constraints for measurability. Not exhausting the stack
975space is a clear requirement of meaningfulness of a run, because source programs
976do not crash for lack of space while object code ones do. The balancing of
977function calls/returns is a requirement for precision: the labeling approach
978allows the scope of label emission statements to extend after function calls to
979minimize the number of labels. Therefore a label pays for all the instructions
980in a block, excluding those executed in nested function calls. If the number of
981calls/returns is unbalanced, it means that there is a call we have not returned
982to that could be followed by additional instructions whose cost has already been
983taken in account. To make the statement true (but less precise) in this
984situation, we could only say that the cost predicted on the source code is a
985safe bound, not that it is exact. The last condition on the entry/exit points of
986a run is used to identify sub-runs whose code correspond to a sequence of blocks
987that we can measure precisely. Any other choice would start/end the run in the
988middle of a block and we would be forced again to weaken the statement taking as
989a bound the cost obtained counting in all the instructions that precede the
990starting one in the block/follow the final one in the block.
991I/O operations can be performed in the prefix of the run, but not in the
992measurable sub-run. Therefore we prove that we can predict reaction times, but
993not I/O times, as it should be.
994
995\section{Future work}
996
997All the CerCo software and deliverables can be found on-line at the
998address \url{http://cerco.cs.unibo.it}.
999
1000The results obtained so far are encouraging and provide evidence that
1001it is possible to perform static time and space analysis at the source level
1002without loosing accuracy, reducing the trusted code base and reconciling the
1003study of functional and non-functional properties of programs. The
1004techniques introduced seem to be scalable, cover both imperative and
1005functional languages and are compatible with every compiler optimization
1006considered by us so far.
1007
1008To prove that compilers can keep track of optimizations
1009and induce a precise cost model on the source code, we targeted a simple
1010architecture that admits a cost model that is execution history independent.
1011The most important future work is dealing with hardware architectures
1012characterized by history dependent stateful components, like caches and
1013pipelines. The main issue consists in assigning a parametric, dependent cost
1014to basic blocks that can be later transferred by the labeling approach to
1015the source code and represented in a meaningful way to the user. The dependent
1016labeling approach that we have studied seems a promising tools to achieve
1017this goal, but the cost model generated for a realistic processor could be too
1018large and complex to be exposed in the source code. Further study is required
1019to evaluate the tecnique on a realistic processor and to introduce early
1020approximations of the cost model to make the tecnique feasible.
1021
1022Examples of further future work consist in improving the cost invariant
1023generator algorithms and the coverage of compiler optimizations, in combining
1024the labeling approach with type \& effect discipline \cite{typeffects}
1025to handle languages with implicit memory management, and in experimenting with
1026our tools in early development phases. Some larger use case is also necessary
1027to evaluate the CerCo's prototype on industrial size programs.
1028
1029\bibliographystyle{llncs}
1030\begin{thebibliography}{19}
1031
1032\bibitem{survey} \textbf{A Survey of Static Program Analysis Techniques}
1033Wolfgang W\"ogerer. Technical report. Technische Universit\"at Wien 2005
1034
1035\bibitem{cerco} \textbf{Certified Complexity}. R. Amadio, A. Asperti, N. Ayache,
1036B. Campbell, D. Mulligan, R. Pollack, Y. Regis-Gianas, C. Sacerdoti Coen, I.
1037Stark, in Procedia Computer Science, Volume 7, 2011, Proceedings of the 2 nd
1038European Future Technologies Conference and Exhibition 2011 (FET 11), 175-177.
1039
1040\bibitem{labeling} \textbf{Certifying and Reasoning on Cost Annotations in C
1041Programs}, N.  Ayache, R.M. Amadio, Y.Régis-Gianas, in Proc. FMICS, Springer
1042LNCS
10437437: 32-46, 2012, DOI:10.1007/978-3-642-32469-7\_3.
1044
1045\bibitem{labeling2} \textbf{Certifying and reasoning on cost annotations of
1046functional programs}.
1047R.M. Amadio, Y. Régis-Gianas. Proceedings of the Second international conference
1048on Foundational and Practical Aspects of Resource Analysis FOPARA 2011 Springer
1049LNCS 2011
1050
1051\bibitem{compcert} \textbf{Formal verification of a realistic compiler}. Leroy,
1052X. In Commun. ACM 52(7), 107–115 (2009)
1053
1054\bibitem{framac} \textbf{Frama-C user manual}. Correnson, L., Cuoq, P., Kirchner,
1055F., Prevosto, V., Puccetti, A., Signoles, J.,
1056Yakobowski, B. in CEA-LIST, Software Safety Laboratory, Saclay, F-91191,
1057\url{http://frama-c.com/}.
1058
1059\bibitem{paolo} \textbf{Indexed Labels for Loop Iteration Dependent Costs}. P.
1060Tranquilli, in Proceedings of the 11th International Workshop on Quantitative
1061Aspects of Programming Languages and Systems (QAPL 2013), Rome, 23rd-24th March
10622013, Electronic Proceedings in Theoretical Computer Science, to appear in 2013.
1063
1064\bibitem{separation} \textbf{Intuitionistic reasoning about shared mutable data
1065structure} John C. Reynolds. In Millennial Perspectives in Computer Science,
1066pages 303–321, Houndsmill,
1067Hampshire, 2000. Palgrave.
1068
1069\bibitem{lustre} \textbf{LUSTRE: a declarative language for real-time
1070programming} 
1071Caspi, P. and Pilaud, D. and Halbwachs, N. and Plaice, J. A., In Proceedings of
1072the 14th ACM SIGACT-SIGPLAN symposium on Principles of programming languages ACM
10731987.
1074
1075\bibitem{correctness} \textbf{On the correctness of an optimising assembler for
1076the intel MCS-51 microprocessor}.
1077  Dominic P. Mulligan  and Claudio Sacerdoti Coen. In Proceedings of the Second
1078international conference on Certified Programs and Proofs, Springer-Verlag 2012.
1079
1080\bibitem{proartis} \textbf{PROARTIS: Probabilistically Analysable Real-Time
1081Systems}, F.J. Cazorla, E. Qui˜nones, T. Vardanega, L. Cucu, B. Triquet, G.
1082Bernat, E. Berger, J. Abella, F. Wartel, M. Houston, et al., in ACM Transactions
1083on Embedded Computing Systems, 2012.
1084
1085\bibitem{embounded} \textbf{The EmBounded project (project paper)}. K. Hammond,
1086R. Dyckhoff, C. Ferdinand, R. Heckmann, M. Hofmann, H. Loidl, G. Michaelson, J.
1087Serot, A. Wallace, in Trends in Functional Programming, Volume 6, Intellect
1088Press, 2006.
1089
1090\bibitem{matita}
1091\textbf{The Matita Interactive Theorem Prover}.
1092Andrea Asperti, Claudio Sacerdoti Coen, Wilmer Ricciotti and Enrico Tassi.
109323rd International Conference on Automated Deduction, CADE 2011.
1094
1095\bibitem{typeffects} \textbf{The Type and Effect Discipline}. Jean-Pierre Talpin
1096and Pierre Jouvelot.
1097  In Proceedings of the Seventh Annual Symposium on Logic in Computer Science
1098(LICS '92), Santa Cruz, California, USA, June 22-25, 1992.
1099IEEE Computer Society 1992.
1100
1101\bibitem{stateart} \textbf{The worst-case execution-time problem overview of
1102methods
1103and survey of tools.} Wilhelm R. et al., in  ACM Transactions on Embedded
1104Computing Systems, 7:1–53, May 2008.
1105
1106%\bibitem{proartis2} \textbf{A Cache Design for Probabilistic Real-Time
1107% Systems}, L. Kosmidis, J. Abella, E. Quinones, and F. Cazorla, in Design,
1108% Automation, and Test in Europe (DATE), Grenoble, France, 03/2013.
1109
1110\end{thebibliography}
1111
1112
1113%\bibliography{fopara13.bib}
1114
1115\end{document}
Note: See TracBrowser for help on using the repository browser.