source: Papers/fopara2013/fopara13.tex @ 3327

Last change on this file since 3327 was 3327, checked in by tranquil, 6 years ago

static word wrap

File size: 61.2 KB
Line 
1\documentclass{llncs}
2
3\usepackage{amsfonts}
4\usepackage{amsmath}
5\usepackage{amssymb} 
6\usepackage[english]{babel}
7\usepackage{color}
8\usepackage{fancybox}
9\usepackage{fancyvrb}
10\usepackage{graphicx}
11\usepackage[colorlinks]{hyperref}
12\usepackage{hyphenat}
13\usepackage[utf8x]{inputenc}
14\usepackage{listings}
15\usepackage{mdwlist}
16\usepackage{microtype}
17\usepackage{stmaryrd}
18\usepackage{url}
19
20\usepackage{tikz}
21\usetikzlibrary{positioning,calc,patterns,chains,shapes.geometric,scopes}
22
23%\renewcommand{\verb}{\lstinline}
24%\def\lstlanguagefiles{lst-grafite.tex}
25%\lstset{language=Grafite}
26\lstset{language=C}
27
28\newlength{\mylength}
29\newenvironment{frametxt}%
30        {\setlength{\fboxsep}{5pt}
31                \setlength{\mylength}{\linewidth}%
32                \addtolength{\mylength}{-2\fboxsep}%
33                \addtolength{\mylength}{-2\fboxrule}%
34                \Sbox
35                \minipage{\mylength}%
36                        \setlength{\abovedisplayskip}{0pt}%
37                        \setlength{\belowdisplayskip}{0pt}%
38                }%
39                {\endminipage\endSbox
40                        \[\fbox{\TheSbox}\]}
41
42\title{Certified Complexity (CerCo)\thanks{The project CerCo acknowledges the
43financial support of the Future and Emerging Technologies (FET) programme within
44the Seventh Framework Programme for Research of the European Commission, under
45FET-Open grant number: 243881}}
46\author{
47%Roberto
48R. M. Amadio$^{3,4}$ \and
49%Nicolas
50N. Ayache$^4$ \and
51%François
52F. Bobot$^4$ \and
53%Jacob
54J. P. Boender$^1$ \and
55%Brian
56B. Campbell$^2$ \and
57%Ilias
58I. Garnier$^2$ \and
59%Antoine
60A. Madet$^4$ \and
61%James
62J. McKinna$^2$ \and
63%Dominic
64D. P. Mulligan$^1$ \and
65%Mauro
66M. Piccolo$^1$ \and
67%Yann
68Y. R\'egis-Gianas$^4$ \and
69%Claudio
70C. Sacerdoti Coen$^1$ \and
71%Ian
72I. Stark$^2$ \and
73%Paolo
74P. Tranquilli$^1$}
75\institute{Dipartimento di Informatica - Scienza e Ingegneria, Universit\'a di
76Bologna \and
77LFCS, University of Edinburgh
78\and INRIA (Team $\pi$r2 )
79\and
80Universit\`e Paris Diderot
81}
82
83\bibliographystyle{splncs03}
84
85\begin{document}
86
87\maketitle
88
89\begin{abstract}
90This paper provides an overview of the FET-Open Project CerCo
91(Certified Complexity). The main achievement of the project has been
92the introduction of a technique to perform static analysis of non-functional
93properties of programs (time, space) at the source level, without loosing
94accuracy and with a small trusted code base. The main software component
95developed is a formally certified complexity certifying compiler. The compiler
96translates the source code to the object code and an instrumented copy of the
97source code. The latter exposes at
98the source level and tracks precisely the actual (non asymptotic)
99computational cost. Untrusted invariant generators and trusted theorem provers
100are then used to compute and certify the parametric execution time of the
101code.
102\end{abstract}
103
104% ---------------------------------------------------------------------------- %
105% SECTION                                                                      %
106% ---------------------------------------------------------------------------- %
107\section{Introduction}
108
109\paragraph{Problem statement.} Computer programs can be specified with both
110functional constraints (what a program must do) and non-functional constraints
111(e.g. what resources – time, space, etc – the program may use).  In the current
112state of the art, functional properties are verified for high-level source code
113by combining user annotations (e.g. preconditions and invariants) with a
114multitude of automated analyses (invariant generators, type systems, abstract
115interpretation, theorem proving, etc.). By contrast, non-functional properties
116are generally checked on low-level object code, but also demand information
117about high-level functional behavior that must somehow be recreated.
118
119This situation presents several problems: 1) it can be hard to infer this
120high-level structure in the presence of compiler optimizations; 2) techniques
121working on object code are not useful in early development;  yet problems
122detected later are more expensive to tackle; 3) parametric cost analysis is very
123hard: how can we reflect a cost that depends on the execution state (e.g. the
124value of a register or a carry bit) to a cost that the user can understand
125looking at source code?; 4) functional analysis performed only on object code
126makes any contribution from the programmer hard, leading to less precision in
127the estimates.
128
129\paragraph{Vision and approach.} We want to reconcile functional and
130non-functional analysis: to share information and perform both at the same time
131on source code.
132%
133What has previously prevented this approach is the lack of a uniform and precise
134cost model for high-level code: 1) each statement occurrence is compiled
135differently and optimizations may change control flow; 2) the cost of an object
136code instruction may depend on the runtime state of hardware components like
137pipelines and caches, which is not visible in the source code.
138
139To solve the issue, we envision a new generation of compilers able to keep track
140of program structure through compilation and optimisation, and to exploit that
141information to define a cost model for source code that is precise, non-uniform,
142and accounts for runtime state. With such a source-level cost model we can
143reduce non-functional verification to the functional case and exploit the state
144of the art in automated high-level verification \cite{survey}. The techniques
145previously used by Worst Case Execution Time (WCET) analysers on the object code
146are still available, but can now be coupled with additional source-level
147analysis. Where the approach produces precise cost models too complex to reason
148about, safe approximations can be used to trade complexity with precision.
149Finally, source code analysis can be made during early development stages, when
150components have been specified but not implemented: source code modularity means
151that it is enough to specify the non-functional behavior of unimplemented
152components.
153
154\paragraph{Contributions.} We have developed the labelling approach \cite{?}, a
155technique to implement compilers that induce cost models on source programs by
156very lightweight tracking of code changes through compilation. We have studied
157how to formally prove the correctness of compilers implementing this technique.
158We have implemented such a compiler from C to object binaries for the 8051
159microcontroller, and verified it in an interactive theorem prover. We have
160implemented a Frama-C plugin \cite{framac} that invokes the compiler on a source
161program and uses this to generate invariants on the high-level source that
162correctly model low-level costs. Finally, the plug-in certifies that the program
163respects these costs by calling automated theorem provers, a new and innovative
164technique in the field of cost analysis. As a case study, we show how the
165plug-in can automatically compute and certify the exact reaction time of Lustre
166\cite{lustre} dataflow programs compiled into C.
167
168\section{Project context and objectives}
169Formal methods for verification of functional properties of programs have
170reached a level of maturity and automation that is allowing a slow but
171increasing adoption in production environments. For safety critical code, it is
172getting usual to combine rigorous software engineering methodologies and testing
173with static analysis in order to benefit from the strong points of every
174approach and mitigate the weaknesses. Of particular interest are open frameworks
175for the combination of different formal methods, where the programs can be
176progressively specified and are continuously enriched with new safety
177guarantees: every method contributes knowledge (e.g. new invariants) that
178becomes an assumption for later analysis.
179
180The scenario for the verification of non-functional properties (time spent,
181memory used, energy consumed) is bleaker and the future seems to be getting even
182worse. Most industries verify that real time systems meets their deadlines
183simply measuring the time spent in many runs of the systems,  computing the
184maximum time and adding an empirical safety margin, claiming the result to be a
185bound for the WCET of the program. Formal methods and software to statically
186analyse the WCET of programs exist, but they often produce bounds that are too
187pessimistic to be useful. Recent advancements in hardware architectures is all
188focused on the improvement of the average case performance, not the
189predictability of the worst case. Execution time is getting more and more
190dependent from the execution history, that determines the internal state of
191hardware components like pipelines and caches. Multi-core processors and non
192uniform memory models are drastically reducing the possibility of performing
193static analysis in isolation, because programs are less and less time
194composable. Clock precise hardware models are necessary to static analysis, and
195getting them is becoming harder as a consequence of the increased hardware
196complexity.
197
198Despite all the above, the need for reliable real time systems and programs is
199increasing, and there is a raising pressure from the research community towards
200the differentiation of hardware. The aim is the introduction of alternative
201hardware with a more predictable behaviour, more suitable to be statically
202analysed. An example is the decoupling execution time from the execution history
203by introducing randomization \cite{proartis}.
204
205In the CerCo project we do not try to address this problem, optimistically
206assuming that static analysis of non-functional properties of programs will
207return to be feasible in the long term. The main objective of our work is
208instead to bring together static analysis of functional and non-functional
209properties, which, according to the current state of the art, are completely
210independent activities with limited exchange of information: while the
211functional properties are verified on the source code, the analysis of
212non-functional properties is entirely performed on the object code to exploit
213clock precise hardware models.
214
215There are two main reasons to currently perform the analysis on the object code.
216The first one is the obvious lack of a uniform, precise cost model for source
217code instructions (or even basic blocks). During compilation, high level
218instructions are torn apart and reassembled in context-specific ways so that
219identifying a fragment of object code with a single high level instruction is
220unfeasible. Even the control flow of the object and source code can be very
221different as a result of optimizations, for exampe due to aggressive loop
222optimisations. Despite the lack of a uniform, compilation- and
223program-independent cost model on the source language, the literature on the
224analysis of non asymptotic execution time on high level languages that assumes
225such a model is growing and getting momentum. Its practical usefulness is doomed
226to be minimal, unless we can provide a replacement for such cost models. Some
227hope has been provided by the EmBounded project \cite{embounded}, which
228compositionally compiles high level code to a byte code that is executed by an
229emulator with guarantees on the maximal execution time spent for each byte code
230instruction. The approach indeed provides a uniform model, at the price of
231loosing precision of the model (each cost is a pessimistic upper bound) and
232performance of the executed code (because the byte code is emulated
233compositionally instead of performing a fully non compositional compilation).
234%
235The second reason to perform the analysis on the object code is that bounding
236the worst case execution time of small code fragments in isolation (e.g. loop
237bodies) and then adding up the bounds yields very poor estimates because no
238knowledge on the hardware state can be assumed when executing the fragment. By
239analysing longer runs the bound obtained becomes more precise because the lack
240of knowledge on the initial state has less effects on longer computations.
241
242In CerCo we propose a radically new approach to the problem: we reject the idea
243of a uniform cost model and we propose that the compiler, which knows how the
244code is translated, must return the cost model for basic blocks of high level
245instructions. It must do so by keeping track of the control flow modifications
246to reverse them and by interfacing with static analysers.
247
248By embracing compilation, instead of avoiding it like EmBounded did, a CerCo
249compiler can at the same time produce efficient code and return costs that are
250as precise as the static analysis can be. Moreover, we allow our costs to be
251parametric: the cost of a block can depend on actual program data or on a
252summary of the execution history or on an approximated representation of the
253hardware state. For example, loop optimizations may assign to a loop body a cost
254that is a function of the number of iterations performed. As another example,
255the cost of a block may be a function of the vector of stalled pipeline states,
256which can be exposed in the source code and updated at each basic block exit. It
257is parametricity that allows to analyse small code fragments without loosing
258precision: in the analysis of the code fragment we do not have to ignore the
259initial hardware state. On the contrary, we can assume to know exactly which
260state (or mode, as WCET literature calls it) we are in.
261
262The cost of an execution is the sum of the cost of basic blocks multiplied by
263the number of times they are executed, which is a functional property of the
264program. Therefore, in order to perform (parametric) time analysis of programs,
265it is necessary to combine a cost model with control and data flow analysis.
266Current state of the art WCET technology
267\cite{stateart} performs the analysis on the object code, where the logic of the
268program is harder to reconstruct and most information available on the source
269code has been lost. Imprecision in the analysis leads to useless bounds. To
270augment precision, the tools ask the user to provide constraints on the object
271code control flow, usually in the form of bounds on the number of iterations of
272loops or linear inequalities on them. This requires the user to manually link
273the source and object code, translating his assumptions on the source code
274(which may be wrong) to object code constraints. The task is error prone and
275hard, in presence of complex optimizations.
276
277The CerCo approach has the potentiality to dramatically improve the state of the
278art. By performing control and data flow analysis on the source code, the error
279prone translation of invariants is completely avoided. It is in fact performed
280by the compiler itself when it induces the cost model on the source language.
281Moreover, any available technique for the verification of functional properties
282can be immediately reused and multiple techniques can collaborate together to
283infer and certify cost invariants for the program. Parametric cost analysis
284becomes the default one, with non parametric bounds used as last resorts when
285trading the complexity of the analysis with its precision. A priori, no
286technique previously used in traditional WCET is lost: they can just be applied
287on the source code.
288
289Traditional techniques for WCET that work on object code are also affected by
290another problem: they cannot be applied before the generation of the object
291code. Functional properties can be analysed in early development stages, while
292analysis of non-functional properties may come too late to avoid expensive
293changes to the program architecture. Our approach already works in early
294development stages by axiomatically attaching costs to unimplemented components.
295
296All software used to verify properties of programs must be as bug free as
297possible. The trusted code base for verification is made by the code that needs
298to be trusted to believe that the property holds. The trusted code base of
299state-of-the-art WCET tools is very large: one needs to trust the control flow
300analyser and the linear programming libraries it uses and also the formal models
301of the hardware. In CerCo we are moving the control flow analysis to the source
302code and we are introducing a non standard compiler too. To reduce the trusted
303code base, we implemented a prototype and a static analyser in an interactive
304theorem prover, which was used to certify that the cost computed on the source
305code is indeed the one actually spent by the hardware. Formal models of the
306hardware and of the high level source languages were also implemented in the
307interactive theorem prover. Control flow analysis on the source code has been
308obtained using invariant generators, tools to produce proof obligations from
309generated invariants and automatic theorem provers to verify the obligations. If
310the automatic provers are able to generate proof traces that can be
311independently checked, the only remaining component that enters the trusted code
312base is an off-the-shelf invariant generator which, in turn, can be proved
313correct using an interactive theorem prover. Therefore we achieve the double
314objective of allowing to use more off-the-shelf components (e.g. provers and
315invariant generators) while reducing the trusted code base at the same time.
316
317%\paragraph{Summary of the CerCo objectives.} To summarize, the goal of CerCo is
318% to reconcile functional with non-functional analysis by performing them together
319% on the source code, sharing common knowledge about execution invariants. We want
320% to achieve the goal implementing a new generation of compilers that induce a
321% parametric, precise cost model for basic blocks on the source code. The compiler
322% should be certified using an interactive theorem prover to minimize the trusted
323% code base of the analysis. Once the cost model is induced, off-the-shelf tools
324% and techniques can be combined together to infer and prove parametric cost
325% bounds.
326%The long term benefits of the CerCo vision are expected to be:
327%1. the possibility to perform static analysis during early development stages
328%2.  parametric bounds made easier
329%3.  the application of off-the-shelf techniques currently unused for the
330% analysis of non-functional properties, like automated proving and type systems
331%4. simpler and safer interaction with the user, that is still asked for
332% knowledge, but on the source code, with the additional possibility of actually
333% verifying the provided knowledge
334%5. a reduced trusted code base
335%6. the increased accuracy of the bounds themselves.
336%
337%The long term success of the project is hindered by the increased complexity of
338% the static prediction of the non-functional behavior of modern hardware. In the
339% time frame of the European contribution we have focused on the general
340% methodology and on the difficulties related to the development and certification
341% of a cost model inducing compiler.
342
343\section{The typical CerCo workflow}
344\begin{figure}[!t]
345\begin{tabular}{l@{\hspace{0.2cm}}|@{\hspace{0.2cm}}l}
346\begin{lstlisting}
347char a[] = {3, 2, 7, -4};
348char treshold = 4;
349
350int main() {
351  char j;
352  char *p = a;
353  int found = 0;
354  for (j=0; j < 4; j++) {
355    if (*p <= treshold)
356      { found++; }
357    p++;
358  }
359  return found;
360}
361\end{lstlisting}
362&
363%  $\vcenter{\includegraphics[width=7.5cm]{interaction_diagram.pdf}}$
364\begin{tikzpicture}[
365    baseline={([yshift=-.5ex]current bounding box)},
366    element/.style={draw, text width=1.6cm, on chain, text badly centered},
367    >=stealth
368    ]
369{[start chain=going below, node distance=.5cm]
370\node [element] (cerco) {CerCo\\compiler};
371\node [element] (cost)  {CerCo\\cost plugin};
372{[densely dashed]
373\node [element] (ded)   {Deductive\\platform};
374\node [element] (check) {Proof\\checker};
375}
376}
377\coordinate [left=3.5cm of cerco] (left);
378{[every node/.style={above, text width=3.5cm, text badly centered,
379                     font=\scriptsize}]
380\draw [<-] ([yshift=-1ex]cerco.north west) coordinate (t) --
381    node {C source}
382    (t-|left);
383\draw [->] (cerco) -- (cost);
384\draw [->] ([yshift=1ex]cerco.south west) coordinate (t) --
385    node {C source+cost annotations}
386    (t-|left) coordinate (cerco out);
387\draw [->] ([yshift=1ex]cost.south west) coordinate (t) --
388    node {C source+cost annotations\\+synthesized assertions}
389    (t-|left) coordinate (out);
390{[densely dashed]
391\draw [<-] ([yshift=-1ex]ded.north west) coordinate (t) --
392    node {C source+cost annotations\\+complexity assertions}
393    (t-|left) coordinate (ded in) .. controls +(-.5, 0) and +(-.5, 0) .. (out);
394\draw [->] ([yshift=1ex]ded.south west) coordinate (t) --
395    node {complexity obligations}
396    (t-|left) coordinate (out);
397\draw [<-] ([yshift=-1ex]check.north west) coordinate (t) --
398    node {complexity proof}
399    (t-|left) .. controls +(-.5, 0) and +(-.5, 0) .. (out);
400\draw [dash phase=2.5pt] (cerco out) .. controls +(-1, 0) and +(-1, 0) ..
401    (ded in);
402}}
403%% user:
404% head
405\draw (current bounding box.west) ++(-.2,.5)
406    circle (.2) ++(0,-.2) -- ++(0,-.1) coordinate (t);
407% arms
408\draw (t) -- +(-.3,-.2);
409\draw (t) -- +(.3,-.2);
410% body
411\draw (t) -- ++(0,-.4) coordinate (t);
412% legs
413\draw (t) -- +(-.2,-.6);
414\draw (t) -- +(.2,-.6);
415\end{tikzpicture}
416\end{tabular}
417\caption{On the left: code to count the array's elements
418 greater or equal to the treshold. On the right: CerCo's interaction
419 diagram. CerCo's components are drawn solid.}
420\label{test1}
421\end{figure}
422We illustrate the workflow we envision (on the right of \autoref{test1})
423on an example program (on the left of \autoref{test1}).
424The user writes the program and feeds it to the Cerco compiler, which outputs
425an instrumented version of the same program that updates global variables
426that record the elapsed execution time and the stack space usage.
427The red lines in \autoref{itest1} are the instrumentation introduced by the
428compiler. The annotated program can then be enriched with complexity assertions
429in the style of Hoare logic, that are passed to a deductive platform (in our
430case Frama-C). We provide as a Frama-c cost plug-in a simple automatic
431synthesiser for complexity assertions (the blue lines in \autoref{itest1}),
432which can be overridden by the user to increase or decrease accuracy. From the
433assertions, a general purpose deductive platform produces proof obligations
434which in turn can be closed by automatic or interactive provers, ending in a
435proof certificate. Nine proof obligations are generated
436from~\autoref{itest1} (to prove that the loop invariant holds after
437one execution if it holds before, to prove that the whole program execution
438takes at most 1228 cycles, etc.). The CVC3 prover closes all obligations
439in a few seconds on standard hardware.
440%
441\fvset{commandchars=\\\{\}}
442\lstset{morecomment=[s][\color{blue}]{/*@}{*/},
443        moredelim=[is][\color{blue}]{$}{$},
444        moredelim=[is][\color{red}]{|}{|}}
445\begin{figure}[!p]
446\begin{lstlisting}
447|int __cost = 33; int __stack = 5, __stack_max = 5;|
448
449|void __cost_incr(int incr) { __cost = __cost + incr; }|
450
451|void __stack_incr(int incr) {
452  __stack = __stack + incr;
453  __stack_max = __stack_max < __stack ? __stack : __stack_max;
454}|
455
456char a[4] = { 3, 2, 7, 252, };
457char treshold = 4;
458
459/*@ behavior stack_cost:
460      ensures __stack_max <=
461              __max(\old(__stack_max), \old(__stack));
462      ensures __stack == \old(__stack);
463     
464    behavior time_cost:
465      ensures __cost <= \old(__cost)+1228; */
466int main(void)
467{
468  char j;
469  char *p;
470  int found;
471  |__stack_incr(0); __cost_incr(91);|
472  p = a;
473  found = 0;
474  $__l: /* internal */$
475  /*@ for time_cost: loop invariant
476        __cost <=
477        \at(__cost,__l)+220*(__max(\at(5-j,__l), 0)-__max (5-j, 0));
478      for stack_cost: loop invariant
479        __stack_max == \at(__stack_max,__l);
480      for stack_cost: loop invariant
481        __stack == \at(__stack,__l);
482      loop variant 4-j; */
483  for (j = 0; j < 4; j++) {
484    |__cost_incr(76);|
485    if (*p <= treshold) {
486      |__cost_incr(144);|
487      found++;
488    } else {
489      |__cost_incr(122);|
490    }
491    p++;
492  }
493  |__cost_incr(37); __stack_incr(-0);|
494  return found;
495}
496\end{lstlisting}
497\caption{The instrumented version of the program in \autoref{test1},
498 with instrumentation added by the CerCo compiler in red and cost invariants
499 added by the CerCo Frama-C plugin in blue. The \texttt{\_\_cost},
500 \texttt{\_\_stack} and \texttt{\_\_stack\_max} variables hold the elapsed time
501in clock cycles and the current and max stack usage. Their initial values
502hold the clock cycles spent in initializying the global data before calling
503\texttt{main} and the space required by global data (and thus unavailable for
504the stack).
505}
506\label{itest1}
507\end{figure}
508\section{Main S\&T results of the CerCo project}
509% We will now review the main S\&T results achieved in the CerCo project.
510
511\emph{The (basic) labelling approach~\cite{?}.} It is the main technique that
512underlies all the developments in CerCo. It allows to track the evolution of
513basic blocks during compilation in order to propagate the cost model from the
514object code to the source code without loosing precision in the process.
515
516\emph{Dependent labelling~\cite{?}.} The basic labelling approach assumes a
517bijective mapping between object code and source code O(1) blocks (called basic
518blocks). This assumption is violated by many program optimizations (e.g. loop
519peeling and loop unrolling). It also assumes the cost model computed on the
520object code to be non parametric: every block must be assigned a cost that does
521not depend on the state. This assumption is violated by stateful hardware like
522pipelines, caches, branch prediction units. The dependent labelling approach is
523an extension of the basic labelling approach that allows to handle parametric
524cost models. We showed how the method allows to deal with loop optimizations and
525pipelines, and we speculated about its applications to caches.
526
527\emph{Techniques to exploit the induced cost model.} Every technique used for
528the analysis of functional properties of programs can be adapted to analyse the
529non-functional properties of the source code instrumented by compilers that
530implement the labelling approach. In order to gain confidence in this claim, we
531showed how to implement a cost invariant generator combining abstract
532interpretation with separation logic ideas \cite{separation}. We integrated
533everything in the Frama-C modular architecture, in order to compute proof
534obligations from functional and cost invariants and to use automatic theorem
535provers on them. This is an example of a new technique that is not currently
536exploited in WCET analysis. It also shows how precise functional invariants
537benefits the non-functional analysis too. Finally, we show how to fully
538automatically analyse the reaction time of Lustre nodes that are first compiled
539to C using a standard Lustre compiler and then processed by a C compiler that
540implements the labelling approach.
541
542\emph{The CerCo compiler.} This is a compiler from a large subset of the C
543program to 8051/8052 object code,
544integrating the labelling approach and a static analyser for 8051 executables.
545The latter can be implemented easily and does not require dependent costs
546because the 8051 microprocessor is a very simple one, with constant-cost
547instruction. It was chosen to separate the issue of exact propagation of the
548cost model from the orthogonal problem of the static analysis of object code
549that may require approximations or dependent costs. The compiler comes in
550several versions: some are prototypes implemented directly in OCaml, and they
551implement both the basic and dependent labelling approaches; the final version
552is extracted from a Matita certification and at the moment implements only the
553basic approach.
554
555\emph{A formal cost certification of the CerCo compiler.} We implemented the
556CerCo compiler in the interactive theorem prover Matita and have formally
557certified that the cost model induced on the source code correctly and precisely
558predicts the object code behavior. We actually induce two cost models, one for
559time and one for stack space used. We show the correctness of the prediction
560only for those programs that do not exhaust the available machine stack space, a
561property that thanks to the stack cost model we can statically analyse on the
562source code using our Frama-C tool. The preservation of functional properties we
563take as an assumption, not itself formally proved in CerCo.  Other projects have
564already certified the preservation of functional semantics in similar compilers,
565and we have not attempted to directly repeat that work. In order to complete the
566proof for non-functional properties, we have introduced a new semantics for
567programming languages based on a new kind of structured observables with the
568relative notions of forward similarity and the study of the intentional
569consequences of forward similarity. We have also introduced a unified
570representation for back-end intermediate languages that was exploited to provide
571a uniform proof of forward similarity.
572
573\subsection{The (basic) labelling approach.}
574\paragraph{Problem statement.} given a source program $P$, we want to obtain an
575instrumented source program $P'$,  written in the same programming language, and
576the object code $O$ such that: 1) $P'$ is obtained by inserting into $P$ some
577additional instructions to update global cost information like the amount of
578time spent during execution or the maximal stack space required; 2) $P$ and $P'$ 
579must have the same functional behavior, i.e., they must produce that same output
580and intermediate observables; 3) $P$ and $O$ must have the same functional
581behavior; 4) after execution and in interesting points during execution, the
582cost information computed by $P'$ must be an upper bound of the one spent by $O$ 
583 to perform the corresponding operations (soundness property); 5) the difference
584between the costs computed by $P'$ and the execution costs of $O$ must be
585bounded by a program dependent constant (precision property).
586
587\paragraph{The labeling software components.} we solve the problem in four
588stages \cite{labelling}, implemented by four software components that are used
589in sequence.
590
591The first component labels the source program $P$ by injecting label emission
592statements in appropriate positions to mark the beginning of basic blocks. The
593set of labels with their positions is called labelling. The syntax and semantics
594of the source programming language is augmented with label emission statements.
595The statement “EMIT l” behaves like a NOP instruction that does not affect the
596program state or control flow, but it changes the semantics by making the label
597l observable. Therefore the observables of a run of a program becomes a stream
598of label emissions: l1 … ln, called the program trace. We clarify the conditions
599that the labelling must respect later.
600
601The second component is a labelling preserving compiler. It can be obtained from
602an existing compiler by adding label emission statements to every intermediate
603language and by propagating label emission statements during compilation. The
604compiler is correct if it preserves both the functional behavior of the program
605and the generated traces. We may also ask that the function that erases the cost
606emission statements commute with compilation. This optional property grants that
607the labelling does not interfere with the original compiler behavior. A further
608set of requirements will be added later.
609
610The third component is a static object code analyser. It takes in input the
611object code augmented with label emission statements and it computes, for every
612such statement, its scope. The scope of a label emission statement is the tree
613of instructions that may be executed after the statement and before a new label
614emission statement is found. It is a tree and not a sequence because the scope
615may contain a branching statement. In order to grant that such a finite tree
616exists, the object code must not contain any loop that is not broken by a label
617emission statement. This is the first requirement of a sound labelling. The
618analyser fails if the labelling is unsound. For each scope, the analyser
619computes an upper bound of the execution time required by the scope, using the
620maximum of the costs of the two branches in case of a conditional statement.
621Finally, the analyser computes the cost of a label by taking the maximum of the
622costs of the scopes of every statement that emits that label.
623
624The fourth and last component takes in input the cost model computed at step 3
625and the labelled code computed at step 1. It outputs a source program obtained
626by replacing each label emission statement with a statement that increments the
627global cost variable with the cost associated to the label by the cost model. 
628The obtained source code is the instrumented source code.
629
630\paragraph{Correctness.} Requirements 1, 2 and 3 of the program statement
631obviously hold, with 2 and 3 being a consequence of the definition of a correct
632labelling preserving compiler. It is also obvious that the value of the global
633cost variable of an instrumented source code is at any time equal to the sum of
634the costs of the labels emitted by the corresponding labelled code. Moreover,
635because the compiler preserves all traces, the sum of the costs of the labels
636emitted in the source and target labelled code are the same. Therefore, to
637satisfy the fourth requirement, we need to grant that the time taken to execute
638the object code is equal to the sum of the costs of the labels emitted by the
639object code. We collect all the necessary conditions for this to happen in the
640definition of a sound labelling: a) all loops must be broken by a cost emission
641statement;  b) all program instructions must be in the scope of some cost
642emission statement. To satisfy also the fifth requirement, additional
643requirements must be imposed on the object code labelling to avoid all uses of
644the maximum in the cost computation: the labelling is precise if every label is
645emitted at most once and both branches of a conditional jump start with a label
646emission statement.
647
648The correctness and precision of the labelling approach only rely on the
649correctness and precision of the object code labelling. The simplest, but not
650necessary, way to achieve them is to impose correctness and precision
651requirements also on the initial labelling produced by the first software
652component, and to ask the labelling preserving compiler to preserve these
653properties too. The latter requirement imposes serious limitations on the
654compilation strategy and optimizations: the compiler may not duplicate any code
655that contains label emission statements, like loop bodies. Therefore several
656loop optimizations like peeling or unrolling are prevented. Moreover, precision
657of the object code labelling is not sufficient per se to obtain global
658precision: we also implicitly assumed the static analysis to be able to
659associate a precise constant cost to every instruction. This is not possible in
660presence of stateful hardware whose state influences the cost of operations,
661like pipelines and caches. In the next section we will see an extension of the
662basic labelling approach to cover this situation.
663
664The labelling approach described in this section can be applied equally well and
665with minor modifications to imperative and functional languages
666\cite{labelling2}. We have tested it on a simple imperative language without
667functions (a While language), to a subset of C and to two compilation chains for
668a purely functional higher order language. The two main changes required to deal
669with functional languages are: 1) because global variables and updates are not
670available, the instrumentation phase produces monadic code to “update” the
671global costs; 2) the requirements for a sound and precise labelling of the
672source code must be changed when the compilation is based on CPS translations.
673In particular, we need to introduce both labels emitted before a statement is
674executed and labels emitted after a statement is executed. The latter capture
675code that is inserted by the CPS translation and that would escape all label
676scopes.
677
678Phases 1, 2 and 3 can be applied as well to logic languages (e.g. Prolog).
679However, the instrumentation phase cannot: in standard Prolog there is no notion
680of (global) variable whose state is not retracted during backtracking.
681Therefore, the cost of executing computations that are later backtracked would
682not be correctly counted in. Any extension of logic languages with
683non-backtrackable state should support the labelling approach.
684
685\subsection{Dependent labelling.}
686The core idea of the basic labelling approach is to establish a tight connection
687between basic blocks executed in the source and target languages. Once the
688connection is established, any cost model computed on the object code can be
689transferred to the source code, without affecting the code of the compiler or
690its proof. In particular, it is immediate that we can also transport cost models
691that associate to each label functions from hardware state to natural numbers.
692However, a problem arises during the instrumentation phase that replaces cost
693emission statements with increments of global cost variables. The global cost
694variable must be incremented with the result of applying the function associated
695to the label to the hardware state at the time of execution of the block.
696The hardware state comprises both the “functional” state that affects the
697computation (the value of the registers and memory) and the “non-functional”
698state that does not (the pipeline and caches content for example). The former is
699in correspondence with the source code state, but reconstructing the
700correspondence may be hard and lifting the cost model to work on the source code
701state is likely to produce cost expressions that are too hard to reason on.
702Luckily enough, in all modern architectures the cost of executing single
703instructions is either independent of the functional state or the jitter --- the
704difference between the worst and best case execution times --- is small enough
705to be bounded without loosing too much precision. Therefore we can concentrate
706on dependencies over the “non-functional” parts of the state only.
707
708The non-functional state has no correspondence in the high level state and does
709not influence the functional properties. What can be done is to expose the
710non-functional state in the source code. We just present here the basic
711intuition in a simplified form: the technical details that allow to handle the
712general case are more complex and can be found in the CerCo deliverables. We add
713to the source code an additional global variable that represents the
714non-functional state and another one that remembers the last labels emitted. The
715state variable must be updated at every label emission statement, using an
716update function which is computed during static analysis too. The update
717function associates to each label a function from the recently emitted labels
718and old state to the new state. It is computed composing the semantics of every
719instruction in a basic block and restricting it to the non-functional part of
720the state.
721
722Not all the details of the non-functional state needs to be exposed, and the
723technique works better when the part of state that is required can be summarized
724in a simple data structure. For example, to handle simple but realistic
725pipelines it is sufficient to remember a short integer that encodes the position
726of bubbles (stuck instructions) in the pipeline. In any case, the user does not
727need to understand the meaning of the state to reason over the properties of the
728program. Moreover, at any moment the user or the invariant generator tools that
729analyse the instrumented source code produced by the compiler can decide to
730trade precision of the analysis with simplicity by approximating the parametric
731cost with safe non parametric bounds. Interestingly, the functional analysis of
732the code can determine which blocks are executed more frequently in order to
733approximate more aggressively the ones that are executed less.
734
735Dependent labelling can also be applied to allow the compiler to duplicate
736blocks that contain labels (e.g. in loop optimizations). The effect is to assign
737a different cost to the different occurrences of a duplicated label. For
738example, loop peeling turns a loop into the concatenation of a copy of the loop
739body (that executes the first iteration) with the conditional execution of the
740loop (for the successive iterations). Because of further optimizations, the two
741copies of the loop will be compiled differently, with the first body usually
742taking more time.
743
744By introducing a variable that keep tracks of the iteration number, we can
745associate to the label a cost that is a function of the iteration number. The
746same technique works for loop unrolling without modifications: the function will
747assign a cost to the even iterations and another cost to the odd ones. The
748actual work to be done consists in introducing in the source code and for each
749loop a variable that counts the number of iterations. The loop optimization code
750that duplicate the loop bodies must also modify the code to propagate correctly
751the update of the iteration numbers. The technical details are more complex and
752can be found in the CerCo reports and publications. The implementation, however,
753is quite simple and the changes to a loop optimizing compiler are minimal
754\cite{paolo}.
755
756\subsection{Techniques to exploit the induced cost model.}
757We review the cost synthesis techniques developed in the project.
758The starting hypothesis is that we have a certified methodology to annotate
759blocks in the source code with constants which provide a sound and possibly
760precise upper bound on the cost of executing the blocks after compilation to
761object code.
762
763The principle that we have followed in designing the cost synthesis tools is
764that the synthetic bounds should be expressed and proved within a general
765purpose tool built to reason on the source code. In particular, we rely on the
766Frama − C tool to reason on C code and on the Coq proof-assistant to reason on
767higher-order functional programs.
768
769This principle entails that: 1)
770The inferred synthetic bounds are indeed correct as long as the general purpose
771tool is. 2) There is no limitation on the class of programs that can be handled
772as long as the user is willing to carry on an interactive proof.
773
774Of course, automation is desirable whenever possible. Within this framework,
775automation means writing programs that give hints to the general purpose tool.
776These hints may take the form, say, of loop invariants/variants, of predicates
777describing the structure of the heap, or of types in a light logic. If these
778hints are correct and sufficiently precise the general purpose tool will produce
779a proof automatically, otherwise, user interaction is required.
780
781\paragraph{The Cost plug-in and its application to the Lustre compiler}
782Frama − C \cite{framac} is a set of analysers for C programs with a
783specification language called ACSL. New analyses can be dynamically added
784through a plug-in system. For instance, the Jessie plug-in allows deductive
785verification of C programs with respect to their specification in ACSL, with
786various provers as back-end tools.
787We developed the CerCo Cost plug-in for the Frama − C platform as a proof of
788concept of an automatic environment exploiting the cost annotations produced by
789the CerCo compiler. It consists of an OCaml program which in first approximation
790takes the following actions: (1) it receives as input a C program, (2) it
791applies the CerCo compiler to produce a related C program with cost annotations,
792(3) it applies some heuristics to produce a tentative bound on the cost of
793executing the C functions of the program as a function of the value of their
794parameters, (4) the user can then call the Jessie plug-in to discharge the
795related proof obligations.
796In the following we elaborate on the soundness of the framework and the
797experiments we performed with the Cost tool on the C programs produced by a
798Lustre compiler.
799
800\paragraph{Soundness} The soundness of the whole framework depends on the cost
801annotations added by the CerCo compiler, the synthetic costs produced by the
802Cost plug-in, the verification conditions (VCs) generated by Jessie, and the
803external provers discharging the VCs. The synthetic costs being in ACSL format,
804Jessie can be used to verify them. Thus, even if the added synthetic costs are
805incorrect (relatively to the cost annotations), the process as a whole is still
806correct: indeed, Jessie will not validate incorrect costs and no conclusion can
807be made about the WCET of the program in this case. In other terms, the
808soundness does not really depend on the action of the Cost plug-in, which can in
809principle produce any synthetic cost. However, in order to be able to actually
810prove a WCET of a C function, we need to add correct annotations in a way that
811Jessie and subsequent automatic provers have enough information to deduce their
812validity. In practice this is not straightforward even for very simple programs
813composed of branching and assignments (no loops and no recursion) because a fine
814analysis of the VCs associated with branching may lead to a complexity blow up.
815\paragraph{Experience with Lustre} Lustre is a data-flow language to program
816synchronous systems and the language comes with a compiler to C. We designed a
817wrapper for supporting Lustre files.
818The C function produced by the compiler implements the step function of the
819synchronous system and computing the WCET of the function amounts to obtain a
820bound on the reaction time of the system. We tested the Cost plug-in and the
821Lustre wrapper on the C programs generated by the Lustre compiler. For programs
822consisting of a few hundreds loc, the Cost plug-in computes a WCET and Alt −
823Ergo is able to discharge all VCs automatically.
824
825\paragraph{Handling C programs with simple loops}
826The cost annotations added by the CerCo compiler take the form of C instructions
827that update by a constant a fresh global variable called the cost variable.
828Synthesizing a WCET of a C function thus consists in statically resolving an
829upper bound of the difference between the value of the cost variable before and
830after the execution of the function, i.e. find in the function the instructions
831that update the cost variable and establish the number of times they are passed
832through during the flow of execution. In order to do the analysis the plugin
833makes the following assumptions on the programs:
8341. No recursive functions.
8352. Every loop must be annotated with a variant. The variants of ‘for’ loops are
836automatically inferred.
837
838The plugin proceeds as follows.
839First the call graph of the program is computed.
840Then the computation of the cost of the function is performed by traversing its
841control flow graph. If the function f calls the function g then the function g
842is processed before the function f. The cost at a node is the maximum of the
843costs of the successors.
844In the case of a loop with a body that has a constant cost for every step of the
845loop, the cost is the product of the cost of the body and of the variant taken
846at the start of the loop.
847In the case of a loop with a body whose cost depends on the values of some free
848variables, a fresh logic function f is introduced to represent the cost of the
849loop in the logic assertions. This logic function takes the variant as a first
850parameter. The other parameters of f are the free variables of the body of the
851loop. An axiom is added to account the fact that the cost is accumulated at each
852step of the loop.
853The cost of the function is directly added as post-condition of the function
854
855The user can influence the annotation by different means:
856by using more precise variants or
857by annotating function with cost specification. The plugin will use this cost
858for the function instead of computing it.
859\paragraph{C programs with pointers}
860When it comes to verifying programs involving pointer-based data structures,
861such as linked lists, trees, or graphs, the use of traditional first-order logic
862to specify, and of SMT solvers to verify, shows some limitations. Separation
863logic \cite{separation} is then an elegant alternative. It is a program logic
864with a new notion of conjunction to express spatial heap separation. Bobot has
865recently introduced in the Jessie plug-in automatically generated separation
866predicates to simulate separation logic reasoning in a traditional verification
867framework where the specification language, the verification condition
868generator, and the theorem provers were not designed with separation logic in
869mind. CerCo's plug-in can exploit the separation predicates to automatically
870reason on the cost of execution of simple heap manipulation programs such as an
871in-place list reversal.
872
873\subsection{The CerCo Compiler}
874In CerCo we have developed a certain number of cost preserving compilers based
875on the labelling approach. Excluding an initial certified compiler for a While
876language, all remaining compilers target realistic source languages --- a pure
877higher order functional language and a large subset of C with pointers, gotos
878and all data structures --- and real world target processors --- MIPS and the
879Intel 8051/8052 processor family. Moreover, they achieve a level of optimization
880that ranges from moderate (comparable to gcc level 1) to intermediate (including
881loop peeling and unrolling, hoisting and late constant propagation). The so
882called Trusted CerCo Compiler is the only one that was implemented in the
883interactive theorem prover Matita and its costs certified. The code distributed
884is obtained extracting OCaml code from the Matita implementation. In the rest of
885this section we will only focus on the Trusted CerCo Compiler, that only targets
886the C language and the 8051/8052 family, and that does not implement the
887advanced optimizations yet. Its user interface, however, is the same as the one
888of the other versions, in order to trade safety with additional performances. In
889particular, the Frama-C CerCo plug-in can work without recompilation with all
890compilers.
891
892The (trusted) CerCo compiler implements the following optimizations: cast
893simplification, constant propagation in expressions, liveness analysis driven
894spilling of registers, dead code elimination, branch displacement, tunneling.
895The two latter optimizations are performed by our optimizing assembler
896\cite{correctness}. The back-end of the compiler works on three addresses
897instructions, preferred to static single assignment code for the simplicity of
898the formal certification.
899
900The CerCo compiler is loosely based on the CompCert compiler \cite{compcert}, a
901recently developed certified compiler from C to the PowerPC, ARM and x86
902microprocessors. Contrarily to CompCert, both the CerCo code and its
903certification are open source. Some data structures and language definitions for
904the front-end are directly taken from CompCert, while the back-end is a redesign
905of a compiler from Pascal to MIPS used by Francois Pottier for a course at the
906Ecole Polytechnique.
907
908The main peculiarities of the CerCo compiler are:
909\begin{enumerate}
910\item all of our intermediate languages include label emitting instructions to
911implement the labelling approach, and the compiler preserves execution traces.
912\item traditionally compilers target an assembly language with additional
913macro-instructions to be expanded before assembly; for CerCo we need to go all
914the way down to object code in order to perform the static analysis. Therefore
915we integrated also an optimizing assembler and a static analyser.
916\item to avoid implementing a linker and a loader, we do not support separate
917compilation and external calls. Adding a linker and a loader is a transparent
918process to the labelling approach and should create no unknown problem.
919\item we target an 8-bit processor. Targeting an 8 bit processor requires
920several changes and increased code size, but it is not fundamentally more
921complex. The proof of correctness, however, becomes much harder.
922\item we target a microprocessor that has a non uniform memory model, which is
923still often the case for microprocessors used in embedded systems and that is
924becoming common again in multi-core processors. Therefore the compiler has to
925keep track of data and it must move data between memory regions in the proper
926way. Also the size of pointers to different regions is not uniform. An
927additional difficulty was that the space available for the stack in internal
928memory in the 8051 is tiny, allowing only a minor number of nested calls. To
929support full recursion in order to test the CerCo tools also on recursive
930programs, the compiler manually manages a stack in external memory.
931\end{enumerate}
932
933\section{A formal certification of the CerCo compiler}
934The Trusted CerCo Compiler has been implemented and certified using the
935interactive theorem prover Matita. The details on the proof techniques employed
936and
937the proof sketch can be collected from the CerCo deliverables and papers.
938In this section we will only hint at the correctness statement, which turned
939out to be more complex than what we expected at the beginning.
940
941\paragraph{The statement}
942Real time programs are often reactive programs that loop forever responding to
943events (inputs) by performing some computation followed by some action (output)
944and the return to the initial state. For looping programs the overall execution
945time does not make sense. The same happens for reactive programs that spend an
946unpredictable amount of time in I/O. What is interesting is the reaction time
947that measure the time spent between I/O events. Moreover, we are interested in
948predicting and ruling out programs that crash running out of space on a certain
949input.
950Therefore we need to look for a statement that talks about sub-runs of a
951program. The most natural statement is that the time predicted on the source
952code and spent on the object code by two corresponding sub-runs are the same.
953The problem to solve to make this statement formal is how to identify the
954corresponding sub-runs and how to single out those that are meaningful.
955The solution we found is based on the notion of measurability. We say that a run
956has a \emph{measurable sub-run} when both the prefix of the sub-run and the
957sub-run do not exhaust the stack space, the number of function calls and returns
958in the sub-run is the same, the sub-run does not perform any I/O and the sub-run
959starts with a label emission statement and ends with a return or another label
960emission statements. The stack usage is estimated using the stack usage model
961that is computed by the compiler.
962
963The statement that we formally proved is that for each C run with a measurable
964sub-run there exists an object code run with a sub-run such that the observables
965of the pairs of prefixes and sub-runs are the same and the time spent by the
966object code in the sub-run is the same as the one predicted on the source code
967using the time cost model generated by the compiler.
968We briefly discuss the constraints for measurability. Not exhausting the stack
969space is a clear requirement of meaningfulness of a run, because source programs
970do not crash for lack of space while object code ones do. The balancing of
971function calls/returns is a requirement for precision: the labelling approach
972allows the scope of label emission statements to extend after function calls to
973minimize the number of labels. Therefore a label pays for all the instructions
974in a block, excluding those executed in nested function calls. If the number of
975calls/returns is unbalanced, it means that there is a call we have not returned
976to that could be followed by additional instructions whose cost has already been
977taken in account. To make the statement true (but less precise) in this
978situation, we could only say that the cost predicted on the source code is a
979safe bound, not that it is exact. The last condition on the entry/exit points of
980a run is used to identify sub-runs whose code correspond to a sequence of blocks
981that we can measure precisely. Any other choice would start/end the run in the
982middle of a block and we would be forced again to weaken the statement taking as
983a bound the cost obtained counting in all the instructions that precede the
984starting one in the block/follow the final one in the block.
985I/O operations can be performed in the prefix of the run, but not in the
986measurable sub-run. Therefore we prove that we can predict reaction times, but
987not I/O times, as it should be.
988
989\section{Future work}
990
991All the CerCo software and deliverables can be found on-line at the
992address \url{http://cerco.cs.unibo.it}.
993
994The results obtained so far are encouraging and provide evidence that
995it is possible to perform static time and space analysis at the source level
996without loosing accuracy, reducing the trusted code base and reconciling the
997study of functional and non-functional properties of programs. The
998techniques introduced seem to be scalable, cover both imperative and
999functional languages and are compatible with every compiler optimization
1000considered by us so far.
1001
1002To prove that compilers can keep track of optimizations
1003and induce a precise cost model on the source code, we targeted a simple
1004architecture that admits a cost model that is execution history independent.
1005The most important future work is dealing with hardware architectures
1006characterized by history dependent stateful components, like caches and
1007pipelines. The main issue consists in assigning a parametric, dependent cost
1008to basic blocks that can be later transferred by the labelling approach to
1009the source code and represented in a meaningful way to the user. The dependent
1010labelling approach that we have studied seems a promising tools to achieve
1011this goal, but the cost model generated for a realistic processor could be too
1012large and complex to be exposed in the source code. Further study is required
1013to evaluate the tecnique on a realistic processor and to introduce early
1014approximations of the cost model to make the tecnique feasible.
1015
1016Examples of further future work consist in improving the cost invariant
1017generator algorithms and the coverage of compiler optimizations, in combining
1018the labelling approach with type \& effect discipline \cite{typeffects}
1019to handle languages with implicit memory management, and in experimenting with
1020our tools in early development phases. Some larger use case is also necessary
1021to evaluate the CerCo's prototype on industrial size programs.
1022
1023\begin{thebibliography}{}
1024
1025\bibitem{cerco} \textbf{Certified Complexity}, R. Amadio, A. Asperti, N. Ayache,
1026B. Campbell, D. Mulligan, R. Pollack, Y. Regis-Gianas, C. Sacerdoti Coen, I.
1027Stark, in Procedia Computer Science, Volume 7, 2011, Proceedings of the 2 nd
1028European Future Technologies Conference and Exhibition 2011 (FET 11), 175-177.
1029
1030\bibitem{labelling} \textbf{Certifying and Reasoning on Cost Annotations in C
1031Programs}, N.  Ayache, R.M. Amadio, Y.Régis-Gianas, in Proc. FMICS, Springer
1032LNCS
10337437: 32-46, 2012, DOI:10.1007/978-3-642-32469-7\_3.
1034
1035\bibitem{paolo} \textbf{Indexed Labels for Loop Iteration Dependent Costs}, P.
1036Tranquilli, in Proceedings of the 11th International Workshop on Quantitative
1037Aspects of Programming Languages and Systems (QAPL 2013), Rome, 23rd-24th March
10382013, Electronic Proceedings in Theoretical Computer Science, to appear in 2013.
1039
1040\bibitem{embounded} \textbf{The EmBounded project (project paper)}, K. Hammond,
1041R. Dyckhoff, C. Ferdinand, R. Heckmann, M. Hofmann, H. Loidl, G. Michaelson, J.
1042Serot, A. Wallace, in Trends in Functional Programming, Volume 6, Intellect
1043Press, 2006.
1044
1045\bibitem{proartis} \textbf{PROARTIS: Probabilistically Analysable Real-Time
1046Systems}, F.J. Cazorla, E. Qui˜nones, T. Vardanega, L. Cucu, B. Triquet, G.
1047Bernat, E. Berger, J. Abella, F. Wartel, M. Houston, et al., in ACM Transactions
1048on Embedded Computing Systems, 2012.
1049
1050\bibitem{stateart} \textbf{The worst-case execution-time problem overview of
1051methods
1052and survey of tools.} Wilhelm R. et al., in  ACM Transactions on Embedded
1053Computing Systems, 7:1–53, May 2008.
1054
1055%\bibitem{proartis2} \textbf{A Cache Design for Probabilistic Real-Time
1056% Systems}, L. Kosmidis, J. Abella, E. Quinones, and F. Cazorla, in Design,
1057% Automation, and Test in Europe (DATE), Grenoble, France, 03/2013.
1058
1059\bibitem{framac} \textbf{Frama-C user manual} Correnson, L., Cuoq, P., Kirchner,
1060F., Prevosto, V., Puccetti, A., Signoles, J.,
1061Yakobowski, B. in CEA-LIST, Software Safety Laboratory, Saclay, F-91191,
1062\url{http://frama-c.com/}.
1063
1064\bibitem{compcert} \textbf{Formal verification of a realistic compiler} Leroy,
1065X. In Commun. ACM 52(7), 107–115 (2009)
1066
1067\bibitem{lustre} \textbf{LUSTRE: a declarative language for real-time
1068programming} 
1069Caspi, P. and Pilaud, D. and Halbwachs, N. and Plaice, J. A., In Proceedings of
1070the 14th ACM SIGACT-SIGPLAN symposium on Principles of programming languages ACM
10711987.
1072
1073\bibitem{separation} \textbf{Intuitionistic reasoning about shared mutable data
1074structure} John C. Reynolds. In Millennial Perspectives in Computer Science,
1075pages 303–321, Houndsmill,
1076Hampshire, 2000. Palgrave.
1077
1078\bibitem{typeffects} \textbf{The Type and Effect Discipline} Jean-Pierre Talpin
1079and Pierre Jouvelot.
1080  In Proceedings of the Seventh Annual Symposium on Logic in Computer Science
1081(LICS '92), Santa Cruz, California, USA, June 22-25, 1992.
1082IEEE Computer Society 1992.
1083
1084\bibitem{labelling2} \textbf{Certifying and reasoning on cost annotations of
1085functional programs}.
1086R.M. Amadio, Y. Régis-Gianas. Proceedings of the Second international conference
1087on Foundational and Practical Aspects of Resource Analysis FOPARA 2011 Springer
1088LNCS 2011
1089
1090\bibitem{correctness} \textbf{On the correctness of an optimising assembler for
1091the intel MCS-51 microprocessor} 
1092  Dominic P. Mulligan  and Claudio Sacerdoti Coen. In Proceedings of the Second
1093international conference on Certified Programs and Proofs, Springer-Verlag 2012.
1094
1095\bibitem{survey} \textbf{A Survey of Static Program Analysis Techniques}
1096Wolfgang W\"ogerer. Technical report. Technische Universit\"at Wien 2005
1097
1098\end{thebibliography}
1099
1100
1101%\bibliography{fopara13.bib}
1102
1103\end{document}
Note: See TracBrowser for help on using the repository browser.