Fix Pierre's feedback, switched to linear CCS

This commit is contained in:
Théophile Bastian 2016-08-12 16:29:29 +01:00
parent 2ee16840bf
commit 579bcc3451
4 changed files with 339 additions and 244 deletions

View file

@ -2,6 +2,9 @@ TEX=report
DOTFIGURES=coffeemachine.game.tex coffeemachine.strat.tex DOTFIGURES=coffeemachine.game.tex coffeemachine.strat.tex
REMOTE=www.tobast
REMOTEDIR=~/tobast.fr/public_html/l3/report.pdf
################################################### ###################################################
DOTFIGURES_PATH=$(patsubst %,_build/dot/%,$(DOTFIGURES)) DOTFIGURES_PATH=$(patsubst %,_build/dot/%,$(DOTFIGURES))
@ -12,6 +15,9 @@ all: $(TEX).tex $(DOTFIGURES_PATH)
pdflatex $(TEX) pdflatex $(TEX)
pdflatex $(TEX) pdflatex $(TEX)
upload: all
scp $(TEX).pdf $(REMOTE):$(REMOTEDIR)
_build: _build:
mkdir -p _build mkdir -p _build
mkdir -p _build/dot mkdir -p _build/dot

View file

@ -23,7 +23,7 @@
%%%%% Pierre's references %%%%% Pierre's references
@article{hyland2000pcf, @article{hyland2000pcf,
title={On full abstraction for PCF: I, II, and III}, title={{On full abstraction for PCF: I, II, and III}},
author={Hyland, J Martin E and Ong, C-HL}, author={Hyland, J Martin E and Ong, C-HL},
journal={Information and computation}, journal={Information and computation},
volume={163}, volume={163},

View file

@ -7,7 +7,7 @@
\usepackage{tikz} \usepackage{tikz}
\usepackage{relsize} \usepackage{relsize}
\usetikzlibrary{shapes,arrows} \usetikzlibrary{shapes,arrows,positioning}
\newcommand{\fname}[1]{\textsc{#1}} \newcommand{\fname}[1]{\textsc{#1}}
@ -25,6 +25,7 @@
\newcommand{\redarrow}[1]{\overset{#1}{\longrightarrow}} \newcommand{\redarrow}[1]{\overset{#1}{\longrightarrow}}
\newcommand{\config}{\mathscr{C}} \newcommand{\config}{\mathscr{C}}
\newcommand{\downclose}[1]{\left[#1\right]}
\newcommand{\strComp}{\odot} \newcommand{\strComp}{\odot}
\newcommand{\strInteract}{\ostar} \newcommand{\strInteract}{\ostar}
@ -37,10 +38,11 @@
% LCCS % LCCS
\newcommand{\linearmark}{$\mathcal{L}$}
\newcommand{\lcalc}{$\lambda$-calculus} \newcommand{\lcalc}{$\lambda$-calculus}
\newcommand{\lccs}{$\lambda$CCS} \newcommand{\lccs}{$\lambda$CCS}
\newcommand{\linccs}{CCS$_\linarrow$} \newcommand{\linccs}{{\linearmark}CCS}
\newcommand{\llccs}{$\lambda$CCS$_\linarrow$} \newcommand{\llccs}{$\lambda${\linearmark}CCS}
\newcommand{\proc}{\mathbb{P}} \newcommand{\proc}{\mathbb{P}}
\newcommand{\chan}{\mathbb{C}} \newcommand{\chan}{\mathbb{C}}

View file

@ -56,7 +56,7 @@ in notoriety with the development of techniques to capture imperative
programming languages constructions, among which references programming languages constructions, among which references
handling~\cite{abramsky1996linearity}, followed by higher-order handling~\cite{abramsky1996linearity}, followed by higher-order
references~\cite{abramsky1998references}, allowing to model languages with side references~\cite{abramsky1998references}, allowing to model languages with side
effects; or exception handling~\cite{laird2001exceptions}. Until then, the effects; or exception handling~\cite{laird2001exceptions}. Since then, the
field has been deeply explored, providing a wide range of such constructions in field has been deeply explored, providing a wide range of such constructions in
the literature. the literature.
@ -76,13 +76,14 @@ focus at some point on modelling concurrency. The problem was tackled by
\fname{Laird}~\cite{laird2001game}, introducing game semantics for a \lcalc{} \fname{Laird}~\cite{laird2001game}, introducing game semantics for a \lcalc{}
with a few additions, as well as a \emph{parallel execution} operator and with a few additions, as well as a \emph{parallel execution} operator and
communication on channels. It is often considered, though, that \fname{Ghica} communication on channels. It is often considered, though, that \fname{Ghica}
and \fname{Murawski}~\cite{ghica2004angelic} laid the first stone by defining and \fname{Murawski}~\cite{ghica2004angelic} really took the fundamental step
game semantics for a fine-grained concurrent language including parallel by defining game semantics for a fine-grained concurrent language including
execution of ``threads'' and low-level semaphores. parallel execution of ``threads'' and low-level semaphores --- a way more
realistic approach to the problem.
However, both of these constructions are based on \emph{interleavings}. That However, both of these constructions are based on \emph{interleavings}. That
is, they model programs on \emph{tree-like games}, games in which the moves is, they model programs on \emph{tree-like games}, games in which the moves
that a player is allowed to play at a given point is represented as a tree that a player is allowed to play at a given point are represented as a tree
(\eg, in a state $A$, Player can play the move $x$ by following an edge of the (\eg, in a state $A$, Player can play the move $x$ by following an edge of the
tree starting from $A$, thus reaching $B$ and allowing Opponent to play a given tree starting from $A$, thus reaching $B$ and allowing Opponent to play a given
set of moves --- the outgoing edges of $B$). The concurrency is then set of moves --- the outgoing edges of $B$). The concurrency is then
@ -91,31 +92,32 @@ order to reach a game tree in which every possible ``unordered'' (\ie, that is
not enclosed in any kind of synchronisation block, as with semaphores) not enclosed in any kind of synchronisation block, as with semaphores)
combination of moves is a valid path. combination of moves is a valid path.
However, these approaches often introduce non-determinism in the strategies: if However, this approach introduces non-determinism in the strategies: if two
two moves are available to a player, the model often states that she makes a moves are available to a player, the model states that they make a
non-deterministic uniform choice. Yet, one could reasonably argue that a non-deterministic uniform choice. Yet, one could reasonably argue that a
program should behave consistently with the environment, which would mean that program should behave consistently with the environment, which would mean that
the semantics of a program --- even a concurrent one --- should still be the semantics of a program --- even a concurrent one --- should still be
deterministic. This idea was explored outside of the game semantics context, deterministic. This idea was explored outside of the game semantics context,
for instance by~\cite{reynolds1978syntactic}, establishing a type-checking for instance by~\cite{reynolds1978syntactic}, establishing a type-checking
system to restrict concurrent programs to deterministic ones. Some recent work system to restrict concurrent programs to deterministic ones. Some recent work
makes use of linear logic~\cite{caires2010session} for that purpose as well. makes use of linear logic~\cite{caires2010session} for similar purposes as
Yet, these techniques do not adapt well to interleavings game semantics. well. Yet, the interleavings game semantics of these languages remains
non-deterministic.
\paragraph{The purpose of this internship} was to try to take a first step \paragraph{The purpose of this internship} was to try to take a first step
towards the reunification of those two approaches. For that purpose, my towards the reunification of those two developments. For that purpose, my
objective was to give a \emph{deterministic} game semantics to a linear objective was to give a \emph{deterministic} game semantics to a linear
lambda-calculus enriched with parallel and sequential execution operators, as lambda-calculus enriched with parallel and sequential execution operators, as
well as synchronization on channels. In order to model this, I used the well as synchronization on channels. In order to model this, I used the games
\emph{event structures} formalism, described later on and introduced as \emph{event structures} formalism, described later on and introduced
in~\cite{rideau2011concurrent} by S. \fname{Rideau} and G. \fname{Winskel}. in~\cite{rideau2011concurrent} by S. \fname{Rideau} and G. \fname{Winskel}.
Roughly, event structures represent a strategy as a \emph{partial order} on the Roughly, event structures represent a strategy as a \emph{partial order} on the
moves, stating that move $x$ can only be played after move $y$, which is more moves, stating that move $x$ can only be played after move $y$, which is more
flexible than tree-like game approaches. Although a full-abstraction result flexible than tree-like game approaches. Although a full-abstraction result
could not be reached --- but is not so far away ---, we~\qnote{or I?} have could not be reached --- but is not so far away ---, I have proved the
proved the \emph{adequacy} of the operational and denotational semantics, and \emph{adequacy} of the operational and denotational semantics, and have
have obtained an implementation of the (denotational) game semantics, that is, obtained an implementation of the (denotational) game semantics, that is, code
code that translates a term of the language into its corresponding strategy. that translates a term of the language into its corresponding strategy.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{A linear \lcalc{} with concurrency primitives: \llccs} \section{A linear \lcalc{} with concurrency primitives: \llccs}
@ -132,17 +134,12 @@ exactly once ---, partly to keep the model simple, partly to meet the
determinism requirements. determinism requirements.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{An affine variant of CCS} \subsection{A linear variant of CCS~: \linccs}
The variant of CCS we chose to use has two base types: The variant of CCS we chose to use has two base types:
\emph{processes}~($\proc$) and \emph{channels}~($\chan$). It has two base \emph{processes}~($\proc$) and \emph{channels}~($\chan$). It has two base
processes: $0$ (failure) and $1$ (success), although a process can be processes, $0$ (failure) and $1$ (success), although a process can be
considered ``failed'' without reducing to $1$. considered ``failed'' without reducing to $0$ (in case of deadlock).
Although the \lcalc{} into which we will plug it is linear, it appeared
unavoidable to use an \emph{affine} CCS --- that is, each identifier may be
used \emph{at most} once instead of \emph{exactly} once --- to ease the
inductions over CCS terms.
\begin{figure}[h] \begin{figure}[h]
\begin{minipage}[t]{0.60\textwidth} \begin{minipage}[t]{0.60\textwidth}
@ -161,7 +158,7 @@ inductions over CCS terms.
\vert~&\chan & \text{(channel)} \vert~&\chan & \text{(channel)}
\end{align*} \end{align*}
\end{minipage} \end{minipage}
\caption{CCS terms and types}\label{fig:lccs:def} \caption{\linccs{} terms and types}\label{fig:lccs:def}
\end{figure} \end{figure}
The syntax is pretty straightforward to understand: $0$ and $1$ are base The syntax is pretty straightforward to understand: $0$ and $1$ are base
@ -171,17 +168,17 @@ operand is a channel); $(\nu a)$ creates a new channel $a$ on which two
processes can be synchronized. Here, the ``synchronization'' simply means that processes can be synchronized. Here, the ``synchronization'' simply means that
a call to the channel is blocking until its other end has been called as well. a call to the channel is blocking until its other end has been called as well.
The language is simply typed as in figure~\ref{fig:lccs:typing}, in which The language is simply typed as in figure~\ref{fig:lccs:typing}. Note that
$\Phi$ is an environment composed \emph{only} of channels, reflecting the binary operators split their environment between their two operands, ensuring
affine nature of the language. Note that binary operators split their that each identifier is used at most once, and that no rules (in particular the
environment between their two operands, ensuring that each identifier is used axiom rules) ``forget'' any part of the environment, ensuring that each
only once. identifier is used at least once.
\begin{figure}[h] \begin{figure}[h]
\begin{align*} \begin{align*}
\frac{~}{\Phi \vdash 0:\proc} & (\textit{Ax}_0) & \frac{~}{\,\vdash 0:\proc} & (\textit{Ax}_0) &
\frac{~}{\Phi \vdash 1:\proc} & (\textit{Ax}_1) & \frac{~}{\,\vdash 1:\proc} & (\textit{Ax}_1) &
\frac{~}{\Phi, t:A \vdash t:A} & (\textit{Ax}) & \frac{~}{t:A \vdash t:A} & (\textit{Ax}) &
\frac{\Gamma, a:\chan, \bar{a}:\chan \vdash P : \proc} \frac{\Gamma, a:\chan, \bar{a}:\chan \vdash P : \proc}
{\Gamma \vdash (\nu a) P : \proc} {\Gamma \vdash (\nu a) P : \proc}
& (\nu) & (\nu)
@ -196,7 +193,7 @@ only once.
{\Gamma,a:\chan \vdash a \cdot P: \proc} {\Gamma,a:\chan \vdash a \cdot P: \proc}
& (\cdot_\chan) & (\cdot_\chan)
\end{align*} \vspace{-1.5em} \end{align*} \vspace{-1.5em}
\caption{CCS typing rules}\label{fig:lccs:typing} \caption{\linccs{} typing rules}\label{fig:lccs:typing}
\end{figure} \end{figure}
We also equip this language with operational semantics, in the form of a We also equip this language with operational semantics, in the form of a
@ -208,9 +205,10 @@ where $a$ denotes a channel and $x$ denotes any possible label.
\frac{~}{a \cdot P \redarrow{a} P} & & \frac{~}{a \cdot P \redarrow{a} P} & &
\frac{~}{1 \parallel P \redarrow{\tau} P} & & \frac{~}{1 \parallel P \redarrow{\tau} P} & &
\frac{~}{1 \cdot P \redarrow{\tau} P} & & \frac{~}{1 \cdot P \redarrow{\tau} P} & &
\frac{~}{(\nu a) 1 \redarrow{\tau} 1} & & \frac{P \redarrow{\tau_c} Q}
{(\nu a) P \redarrow{\tau} Q} & (c \in \set{a,\bar{a}})&
\frac{P \redarrow{a} P'\quad Q \redarrow{\bar{a}} Q'} \frac{P \redarrow{a} P'\quad Q \redarrow{\bar{a}} Q'}
{P \parallel Q \redarrow{\tau} P' \parallel Q'} {P \parallel Q \redarrow{\tau_a} P' \parallel Q'}
\end{align*}\begin{align*} \end{align*}\begin{align*}
\frac{P \redarrow{x} P'} \frac{P \redarrow{x} P'}
{P \parallel Q \redarrow{x} P' \parallel Q} & & {P \parallel Q \redarrow{x} P' \parallel Q} & &
@ -219,22 +217,34 @@ where $a$ denotes a channel and $x$ denotes any possible label.
\frac{P \redarrow{x} P'} \frac{P \redarrow{x} P'}
{P \cdot Q \redarrow{x} P' \cdot Q} & & {P \cdot Q \redarrow{x} P' \cdot Q} & &
\frac{P \redarrow{x} P'} \frac{P \redarrow{x} P'}
{(\nu a)P \redarrow{x} (\nu a)P'} & (a \neq x) {(\nu a)P \redarrow{x} (\nu a)P'} & (a \not\in \set{x,\tau_a})
\end{align*} \end{align*}
\caption{CCS operational semantics}\label{fig:lccs:opsem} \caption{\linccs{} operational semantics}\label{fig:lccs:opsem}
\end{figure} \end{figure}
We consider that a term $P$ \emph{converges} whenever $P \redarrow{\tau}^\ast We consider that a term $P$ \emph{converges} whenever $P \redarrow{\tau}^\ast
1$, and we write $P \Downarrow$. 1$, and we write $P \Downarrow$.
The $\tau_a$ reduction scheme may sound a bit unusual. It is, however,
necessary. Consider the reduction of $(\nu a) (a \cdot 1 \parallel \bar{a}
\cdot 1)$: the inner term $\tau_a$-reduces to $1$, thus allowing the whole term
to reduce to $1$; but if we replaced that $\tau_a$ with a $\tau$, the whole
term would reduce to $(\nu a) 1$, which has no valid types since $a$ and
$\bar{a}$ are not consumed. Our semantics would then not satisfy subject
reduction.
Switching to an \emph{affine} \linccs{} while keeping it wrapped in a
\emph{linear} \lcalc{} was considered, but yielded way too much problems, while
switching to a fully-affine model would have modified the problem too deeply.
\subsection{Lifting to the higher order: linear \lcalc} \subsection{Lifting to the higher order: linear \lcalc}
In order to reach the studied language, \llccs, we have to lift up \lccs{} to a In order to reach the studied language, \llccs, we have to lift up \linccs{} to
\lcalc. To do so, we add to the language the constructions of a \lcalc. To do so, we add to the language the constructions of
figure~\ref{fig:llam:syntax}, which are basically the usual \lcalc{} figure~\ref{fig:llam:syntax}, which are basically the usual \lcalc{}
constructions slightly transformed to be linear (which is mostly reflected by constructions slightly transformed to be linear (which is mostly reflected by
the typing rules). In particular, there is no base type $\alpha$: the base the typing rules). In particular, the only base types are only $\proc$ and
types are only $\proc$ and $\chan$. $\chan$.
\begin{figure}[h] \begin{figure}[h]
\begin{minipage}[t]{0.55\textwidth} \begin{minipage}[t]{0.55\textwidth}
@ -243,33 +253,31 @@ types are only $\proc$ and $\chan$.
t,u,\ldots ::=~&x \in \mathbb{V} & \text{(variable)}\\ t,u,\ldots ::=~&x \in \mathbb{V} & \text{(variable)}\\
\vert~&t~u & \text{(application)}\\ \vert~&t~u & \text{(application)}\\
\vert~&\lambda x^A \cdot t & \text{(abstraction)}\\ \vert~&\lambda x^A \cdot t & \text{(abstraction)}\\
\vert~&t \tens u & \text{(tensor)} \\ \vert~&\text{\linccs}\textit{ constructions} &
\vert~&\text{let }x,y=z\text{~in }t & \text{(tensor elimination)}
\end{align*} \end{align*}
\end{minipage} \hfill \begin{minipage}[t]{0.40\textwidth} \end{minipage} \hfill \begin{minipage}[t]{0.40\textwidth}
\begin{center}Types\end{center}\vspace{-1em} \begin{center}Types\end{center}\vspace{-1em}
\begin{align*} \begin{align*}
A,B,\ldots ::=~&A \linarrow B & \text{(linear arrow)}\\ A,B,\ldots ::=~&A \linarrow B & \text{(linear arrow)}\\
\vert~&A \Tens B & \text{(tensor)} \vert~&\proc~\vert~\chan & \text{(\linccs)}
\end{align*} \end{align*}
\end{minipage} \end{minipage}
\caption{Linear \lcalc{} terms and types}\label{fig:llam:syntax} \caption{Linear \lcalc{} terms and types}\label{fig:llam:syntax}
\end{figure} \end{figure}
To enforce the linearity, the only the typing rules of the usual \lcalc{} that To enforce the linearity, the only typing rules of the usual \lcalc{} that
have to be changed are those of figure~\ref{fig:llam:typing}. The usual rules have to be changed are the $(\textit{Ax})$ and $(\textit{App})$ presented in
for $\lambda$-abstraction and $\otimes$-elimination applies. figure~\ref{fig:llam:typing}. The $(\textit{Abs})$ rule is the usual one.
\begin{figure}[h] \begin{figure}[h]
\begin{align*} \begin{align*}
\frac{~}{x : A \vdash x : A} & (\textit{Ax}) & \frac{~}{x : A \vdash x : A} & (\textit{Ax}) &
\frac{\Gamma \vdash t : A \linarrow B \quad \Delta \vdash u : A} \frac{\Gamma \vdash t : A \linarrow B \quad \Delta \vdash u : A}
{\Gamma,\Delta \vdash t~u : B} & (\textit{App}) & {\Gamma,\Delta \vdash t~u : B} & (\textit{App}) &
\frac{\Gamma \vdash t : A \quad \Delta \vdash u : B} \frac{\Gamma, x : A \vdash t : B}
{\Gamma, \Delta \vdash t \tens u : A \Tens B} & (\tens_I) {\Gamma \vdash \lambda x^{A} \cdot t : A \linarrow B} & (Abs)
\end{align*} \end{align*}
\caption{Linear \lcalc{} typing rules (eluding the usual \caption{Linear \lcalc{} typing rules}\label{fig:llam:typing}
ones)}\label{fig:llam:typing}
\end{figure} \end{figure}
The linearity is here guaranteed: in the (\textit{Ax}) rule, the environment The linearity is here guaranteed: in the (\textit{Ax}) rule, the environment
@ -320,8 +328,8 @@ game tree
This can of course be used to describe much larger games, and is often useful This can of course be used to describe much larger games, and is often useful
to reason concurrently, since the causal histories appear clearly: the possible to reason concurrently, since the causal histories appear clearly: the possible
states of the game can be read easily by concatenating the events found along states of the game can be read easily by concatenating the events found along a
a path from the root of the tree. path from the root of the tree.
But it also has the major drawback of growing exponentially in size: let us But it also has the major drawback of growing exponentially in size: let us
consider a game in which Opponent must play $A$ and $B$ in no particular order consider a game in which Opponent must play $A$ and $B$ in no particular order
@ -343,10 +351,13 @@ before Player can play $C$. The corresponding tree-like game would be
\end{tikzpicture} \end{tikzpicture}
\] \]
This problem motivated the introduction of \emph{event structures} as a This goes even worse with less structure: since there is $n!$ % chktex 40
formalism to describe such games~\cite{rideau2011concurrent}. Informally, an permutations for $n$ elements, the tree can grow way bigger.
event structure is a partial order $\leq$ on \emph{events} (here, the game's
moves), alongside with a \emph{consistency} relation. This problem motivated the use of \emph{event structures} as a formalism to
describe such games~\cite{rideau2011concurrent}. Informally, an event structure
is a partial order $\leq$ on \emph{events} (here, the game's moves), alongside
with a \emph{consistency} relation.
The purpose of the consistency relation is to describe non-determinism, in The purpose of the consistency relation is to describe non-determinism, in
which we are not interested here, since we seek a deterministic model: in all which we are not interested here, since we seek a deterministic model: in all
@ -373,7 +384,9 @@ look like
\begin{definition}[event structure] \begin{definition}[event structure]
An \emph{event structure}~\cite{winskel1986event} is a poset $(E, \leq_E)$, An \emph{event structure}~\cite{winskel1986event} is a poset $(E, \leq_E)$,
where $E$ is a set of \emph{events} and $\leq_E$ is a partial order on $E$. where $E$ is a set of \emph{events} and $\leq_E$ is a partial order on $E$
such that for all $e \in E$, $\downclose{e} \eqdef \set{e' \in E~\vert~e'
\leq_E e}$ is finite.
The partial order $\leq_E$ naturally induces a binary relation $\edgeArrow$ The partial order $\leq_E$ naturally induces a binary relation $\edgeArrow$
over $E$ that is defined as the transitive reduction of $\leq_E$. over $E$ that is defined as the transitive reduction of $\leq_E$.
@ -425,10 +438,10 @@ A configuration can thus be seen as a valid state of the game. $\config(A)$
plays a major role in definitions and proofs on games and strategies. plays a major role in definitions and proofs on games and strategies.
\begin{notation} \begin{notation}
For $x,y \in \config(A)$, $x \forkover{e} y$ states that $y = x \sqcup For $x,y \in \config(A)$, $x \forkover{e} y$ states that $y =
\set{e}$ (and that both are valid configurations). It is also possible to x \sqcup \set{e}$ (and that both are valid configurations), where $\sqcup$
write $x \forkover{e}$, stating that $x \sqcup \set{e} \in \config(A)$, or denotes the disjoint union. It is also possible to write $x \forkover{e}$,
$x \fork y$. stating that $x \sqcup \set{e} \in \config(A)$, or $x \fork y$.
\end{notation} \end{notation}
%%%%% %%%%%
@ -444,20 +457,20 @@ For instance, one could imagine a game modeling the user interface of a coffee
machine: Player is the coffee machine, while Opponent is a user coming to buy a machine: Player is the coffee machine, while Opponent is a user coming to buy a
drink. drink.
\begin{example}[Coffee machine] \begin{example}[Process game]
Here, the game has only events, but no edges: nothing in the rules of the We can represent a process by the following game:
game constrains the program to behave in a certain way, only its \[
\emph{strategy} will do that. \begin{tikzpicture}
\node (1) at (0,0) [draw=red,ellipse] {call};
\smallskip \node (2) at (2,0) [draw=green,ellipse] {done};
\path[->]
\includedot[scale=0.9]{coffeemachine.game} (1) edge (2);
\captionof{figure}{Coffee machine game} \end{tikzpicture}
\]
\medskip The ``call'' event will be triggered by Opponent (the system) when the
process is started, and Player will play ``done'' when the process has
The user can insert a coin, ask for a coffee or ask for a tea. The coffee finished, if it ever does. The relation $\text{call} \leq \text{done}$
machine can deliver a coffee or deliver a tea. means that a process cannot finish \emph{before} it is called.
\end{example} \end{example}
\begin{definition}[pre-strategy] \begin{definition}[pre-strategy]
@ -465,39 +478,42 @@ drink.
\begin{enumerate}[(i)] \begin{enumerate}[(i)]
\item $\sigma \subseteq A$; \item $\sigma \subseteq A$;
\item $\config(\sigma) \subseteq \config(A)$; \item $\config(\sigma) \subseteq \config(A)$;
% \item \textit{(local injectivity)} $\forall s,s' \in \config(S),
% \sigma(s) = \sigma(s') \implies s = s'$;
\item $\forall s \in \sigma, \rho_A(s) = \rho_\sigma(s)$ \item $\forall s \in \sigma, \rho_A(s) = \rho_\sigma(s)$
\item \qnote{Local injectivity: useless without conflicts?}
\end{enumerate} \end{enumerate}
\end{definition} \end{definition}
\begin{example}[Coffee machine, cont.] \begin{example}[processes, cont.]
A possible \emph{pre-strategy} for our coffee machine example would be A possible \emph{pre-strategy} for the game consisting in two processes put
side by side (in which the game's events are annotated with a number to
distinguish the elements of the two processes) would be
\smallskip \[
\begin{tikzpicture}
\node (1) at (0,1.2) [draw=red,ellipse] {call$_0$};
\node (2) at (0,0) [draw=green,ellipse] {done$_0$};
\node (3) at (2,1.2) [draw=red,ellipse] {call$_1$};
\begin{centering} \path[->]
\includedot{coffeemachine.strat} (1) edge (2)
\captionof{figure}{Coffee machine strategy} (3) edge (2);
\end{centering} \end{tikzpicture}
\]
This pre-strategy makes sense: the coffee machine software waits for the This pre-strategy is valid: it is a subset of the game that does not
user to both put a coin and press ``coffee'' before delivering a coffee, include $\text{call}_1$, but it does include both $\text{call}_0$ and
and same goes for tea. Though, what happens if the user inserts a coin and $\text{done}_0$ and inherits the game's partial order.
presses \emph{both} buttons at the same time? Here, the coffee machine can
dispense both drinks. This behavior is surely unwanted, but cannot be This would describe two processes working in parallel. The process $0$
easily fixed using this representation of the coffee machine --- this would waits before the process $1$ is called to terminate, and the process $1$
have been the role of the \emph{consistency set} we briefly mentioned. never returns.
\end{example} \end{example}
But as it is defined, a pre-strategy does not exactly capture what we expect of But as it is defined, a pre-strategy does not exactly capture what we expect of
a \emph{strategy}: it is too expressive. For instance, a pre-strategy on the a \emph{strategy}: it is too expressive. For instance, the relation
coffee machine game would be allowed to have the (absurd) relation $\text{call}_0 \leq \text{call}_1$ on the above strategy is allowed, stating
$\text{coffee} \leq \text{getTea}$, stating that the user cannot press the that the operating system cannot decide to start the process $1$ before the
``tea'' button before the machine has delivered a coffee. This is unrealistic: process $0$. It is not up to the program to decide that, this strategy is thus
the coffee machine has no mean to enforce this. We then have to restrict unrealistic. We then have to restrict pre-strategies to \emph{strategies}:
pre-strategies to \emph{strategies}:
\begin{definition}[strategy] \begin{definition}[strategy]
A \emph{strategy} is a pre-strategy $\sigma : A$ that A \emph{strategy} is a pre-strategy $\sigma : A$ that
@ -505,8 +521,7 @@ pre-strategies to \emph{strategies}:
\begin{enumerate}[(i)] \begin{enumerate}[(i)]
\item\label{def:receptive} \item\label{def:receptive}
\textit{receptive}: for all $x \in \config(A)$ \st{} \textit{receptive}: for all $x \in \config(A)$ \st{}
$x \forkover{e^-}$, $e \in \sigma$; \qnote{equivalent to $x \forkover{e^-}$, $e \in \sigma$;
$\rho^{-1}(\ominus) \subseteq \sigma$ in a deterministic context?}
\item\label{def:courteous} \item\label{def:courteous}
\textit{courteous}: $\forall x \edgeArrow_\sigma x' \in \sigma$, \textit{courteous}: $\forall x \edgeArrow_\sigma x' \in \sigma$,
@ -515,8 +530,8 @@ pre-strategies to \emph{strategies}:
\end{enumerate} \end{enumerate}
\end{definition} \end{definition}
(\ref{def:receptive}) captures the idea that we should not force Opponent not (\ref{def:receptive}) captures the idea that we cannot prevent Opponent from
to play one of its moves. Indeed, not including an event in a strategy means playing one of its moves. Indeed, not including an event in a strategy means
that this event \emph{will not} be played. It is unreasonable to consider that that this event \emph{will not} be played. It is unreasonable to consider that
a strategy could forbid Opponent to play a given move. a strategy could forbid Opponent to play a given move.
@ -525,8 +540,8 @@ the games' rules, one can only make one of its moves depend on an Opponent
move, \ie{} every direct arrow in the partial order that is not inherited from move, \ie{} every direct arrow in the partial order that is not inherited from
the game should be ${\ominus \edgeArrow \oplus}$. Clearly, it is unreasonable the game should be ${\ominus \edgeArrow \oplus}$. Clearly, it is unreasonable
to consider an arrow ${\ostar \edgeArrow \ominus}$, which would mean forcing to consider an arrow ${\ostar \edgeArrow \ominus}$, which would mean forcing
Opponent to wait for a move (either from Player or Opponent) before player her Opponent to wait for a move (either from Player or Opponent) before playing
move; but ${\oplus \edgeArrow \oplus}$ is also unreasonable, since we're their move; but ${\oplus \edgeArrow \oplus}$ is also unreasonable, since we're
working in a concurrent context. Intuitively, one could think that when playing working in a concurrent context. Intuitively, one could think that when playing
$x$ then $y$, it is undefined whether Opponent will receive $x$ then $y$ or $y$ $x$ then $y$, it is undefined whether Opponent will receive $x$ then $y$ or $y$
then $x$. then $x$.
@ -534,6 +549,8 @@ then $x$.
%%%%% %%%%%
\subsubsection{Operations on games and strategies} \subsubsection{Operations on games and strategies}
\todo{Better progression in this part.}
In order to manipulate strategies and define them by induction over the syntax, In order to manipulate strategies and define them by induction over the syntax,
the following operations will be extensively used. It may also be worth noting the following operations will be extensively used. It may also be worth noting
that in the original formalism~\cite{castellan2016concurrent}, games, that in the original formalism~\cite{castellan2016concurrent}, games,
@ -554,33 +571,52 @@ $B$ and $C$ denotes games, $\sigma: A$ and $\tau: B$ denotes strategies.
polarities) and to strategies. polarities) and to strategies.
\end{definition} \end{definition}
Given two strategies on dual games $A$ and $A^\perp$, it is interesting to In the example before, when talking of ``two processes side by side'', we
compute their \emph{interaction}, that is, ``what will happen if one strategy actually referred formally to the parallel composition of two processes.
plays against the other''.
\note{Are the following names clear enough?} \smallskip
\begin{definition}[Interaction]
Given two strategies on dual games $A$ and $A^\perp$, it is natural and
interesting to compute their \emph{interaction}, that is, ``what will happen if
one strategy plays against the other''.
\begin{definition}[Closed interaction]
Given two strategies $\sigma : A$ and $\tau : A^\perp$, their Given two strategies $\sigma : A$ and $\tau : A^\perp$, their
\emph{interaction} $\sigma \wedge \tau$ is the ESP \emph{interaction} $\sigma \wedge \tau$ is the ESP
$\sigma \cup \tau \subseteq A$ from which causal loops has been removed. $\sigma \cap \tau \subseteq A$ from which causal loops have been removed.
More precisely, $\sigma \cap \tau$ is a set adjoined with a \emph{preorder}
${(\leq_\sigma \cup \leq_\tau)}^\ast$ (transitive closure) that may not
respect antisymmetry, that is, may have causal loops. The event structure
$\sigma \wedge \tau$ is then obtained by removing all the elements
contained in such loops from $\sigma \cup \tau$.
More precisely, $\sigma \cup \tau$ is a set adjoined with a \emph{preorder}
($\leq_\sigma \cup \leq_\tau$) that may not respect antisymmetry, that is,
may have causal loops. $\sigma \wedge \tau$ is then obtained by removing
all the elements contained in such loops from $\sigma \cup \tau$.
\end{definition} \end{definition}
\textit{Note: this can be interpreted as a pullback in the category mentioned \textit{This construction is a simplified version of the analogous one
above.\\ from~\cite{castellan2016concurrent} (the pullback), taking advantage of the
This construction, even though it is equivalent to the construction fact that our event structures are deterministic --- that is, without a
of~\cite{castellan2016concurrent} when considering deterministic strategies, is consistency set.}
no longer valid when adding a consistency set.}
This indeed captures what we wanted: $\sigma \wedge \tau$ contains the moves
that both $\sigma$ and $\tau$ are ready to play, including both orders, except
for the events that can never be played because of a ``deadlock'' (\ie{} a
causal loop).
\smallskip
We might now try to generalize that to an \emph{open} case, where both
strategies don't play on the same games, but only have a common part. Our
objective here is to \emph{compose} strategies: indeed, a strategy on $A^\perp
\parallel B$ can be seen as a strategy \emph{from $A$ to $B$}, playing as
Opponent on a board $A$ and as Player on a board $B$. This somehow looks like a
function, that could be composed with another strategy on $B^\perp \parallel
C$.
\begin{definition}[Compositional interaction] \begin{definition}[Compositional interaction]
Given two strategies $\sigma : A^\perp \parallel B$ and $\tau : B^\perp Given two strategies $\sigma : A^\perp \parallel B$ and $\tau : B^\perp
\parallel C$, their \emph{compositional interaction} $\tau \strInteract \parallel C$, their \emph{compositional interaction} $\tau \strInteract
\sigma$ is an event structure defined as $(\sigma \parallel \id_C) \wedge \sigma$ is an event structure defined as $(\sigma \parallel C) \wedge
(\id_{A^\perp} \parallel \tau)$, where $\id_A$ is exactly the game $A$ seen (A^\perp \parallel \tau)$, where $A^\perp$ and $C$ are seen as strategies.
as a strategy.
\end{definition} \end{definition}
The idea is to put in correspondence the ``middle'' states (those of $B$) while The idea is to put in correspondence the ``middle'' states (those of $B$) while
@ -588,17 +624,180 @@ adding ``neutral'' states for $A$ and $C$.
$\tau \strInteract \sigma$ is an \emph{event structure} (\ie, without $\tau \strInteract \sigma$ is an \emph{event structure} (\ie, without
polarities): indeed, the two strategies disagree on the polarities of the polarities): indeed, the two strategies disagree on the polarities of the
middle part. \qtodo{Tell me more?} middle part. Alternatively, it can be seen as an ESP with a polarity function
over $\set{+,-,?}$.
\note{Fin de la partie refaite.}
From this point, the notion of composition we sought is only a matter of
``hiding'' the middle part:
\begin{definition}[Strategies composition] \begin{definition}[Strategies composition]
Given two strategies $\sigma : A^\perp \parallel B$ and $\tau : B^\perp
\parallel C$, their \emph{composition} $\tau \strComp \sigma$ is the ESP
$(\tau \strInteract \sigma) \cap (A^\perp \parallel C)$, on which the
partial order is the restriction of $\leq_{\tau \strInteract \sigma}$ and
the polarities those of $\sigma$ and $\tau$.
\end{definition} \end{definition}
It is then useful to consider an identity strategy \wrt{} the composition
operator. This identity is called the \emph{copycat} strategy:
\begin{definition}[Copycat]
The \emph{copycat strategy} of a game $A$, $\cc_A$, is the strategy on the
game $A^\perp \parallel A$ whose events are $A^\perp \parallel A$ wholly,
on which the order is the transitive closure of $\leq_{A^\perp \parallel A}
\cup \set{ (1-i, x) \leq (i, x) \vert x \in A~\&~\rho((i,x)) = \oplus}$.
\end{definition}
The copycat strategy of a game is indeed an identity for the composition of
\emph{strategies}. In fact, it even holds that for a \emph{pre-}strategy
$\sigma : A$, $\sigma$ is a strategy $\iff$ $\cc_A \strComp \sigma = \sigma$.
\begin{example}[copycat]
If we consider the following game $A$
\[
\begin{tikzpicture}
\node (1) [draw=red,ellipse] {A};
\node (3) [draw=green,ellipse,right of=1] {C};
\node (2) [draw=red,ellipse,right of=3] {B};
\path[->]
(3) edge (1);
\end{tikzpicture}
\]
its copycat strategy $\cc_A$ is
\[
\begin{tikzpicture}
\node (01) {($A^\perp$)};
\node (02) [below of=01] {($A$)};
\node (11) [draw=green,ellipse,right of=01] {A};
\node (31) [draw=red,ellipse,right of=11] {C};
\node (21) [draw=green,ellipse,right of=31] {B};
\node (12) [draw=red,ellipse,right of=02] {A};
\node (32) [draw=green,ellipse,right of=12] {C};
\node (22) [draw=red,ellipse,right of=32] {B};
\path[->]
(12) edge (11)
(22) edge (21)
(31) edge (32)
(31) edge (11)
(32) edge (12);
\end{tikzpicture}
\]
\end{example}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Interpretation of \llccs} \subsection{Interpretation of \llccs}
We can now equip \llccs{} with denotational semantics, interpreting the
language as strategies as defined in figure~\ref{fig:llccs:interp}.
\begin{figure}[h]
\begin{minipage}[t]{0.45\textwidth} \begin{align*}
\seman{x^A} &\eqdef \cc_{\seman{A}} \\
\seman{t^{A \linarrow B}~u^{A}} &\eqdef
\cc_{A \linarrow B} \strComp \left( \seman{t} \parallel \seman{u}
\right) \\
\seman{\lambda x^A \cdot t} &\eqdef \seman{t}
\end{align*} \end{minipage} \hfill
\begin{minipage}[t]{0.45\textwidth} \begin{align*}
\seman{\alpha} &\eqdef \ominus \\
\seman{A \linarrow B} &\eqdef \seman{A}^\perp \parallel \seman{B} \\
\end{align*}\end{minipage}
\begin{align*}
\seman{P \parallel Q} &\eqdef \left(
\begin{tikzpicture}[baseline, scale=0.8]
\node (4) at (0,0.65) [draw=green,ellipse] {call $P$};
\node (5) at (0,-0.65) [draw=red,ellipse] {done $P$};
\node (2) at (2.5,0.65) [draw=green,ellipse] {call $Q$};
\node (3) at (2.5,-0.65) [draw=red,ellipse] {done $Q$};
\node (0) at (5,0.65) [draw=red,ellipse] {call};
\node (1) at (5,-0.65) [draw=green,ellipse] {done};
\path[->]
(0) edge (1)
edge [bend right] (2)
edge [bend right] (4)
(2) edge (3)
(4) edge (5)
(3) edge [bend right] (1)
(5) edge [bend right] (1);
\end{tikzpicture}
\right) \strComp \left(\seman{P} \parallel \seman{Q}\right) &
\seman{\proc} = \seman{\chan} &\eqdef \begin{tikzpicture}[baseline]
\node (1) at (0,0.5) [draw=red,ellipse] {call};
\node (2) at (0,-0.5) [draw=green,ellipse] {done};
\draw [->] (1) -- (2);
\end{tikzpicture}
\\ %%%%%%%%%%%%%%%%%%%%%%%%%
\seman{P \cdot Q} &\eqdef \left(
\begin{tikzpicture}[baseline,scale=0.8]
\node (4) at (0,0.65) [draw=green,ellipse] {call $P$};
\node (5) at (0,-0.65) [draw=red,ellipse] {done $P$};
\node (2) at (2.5,0.65) [draw=green,ellipse] {call $Q$};
\node (3) at (2.5,-0.65) [draw=red,ellipse] {done $Q$};
\node (0) at (5,0.65) [draw=red,ellipse] {call};
\node (1) at (5,-0.65) [draw=green,ellipse] {done};
\path[->]
(0) edge (1)
edge [bend right] (4)
(2) edge (3)
(4) edge (5)
(3) edge [bend right] (1)
(5) edge (2);
\end{tikzpicture}
\right) \strComp \left(\seman{P} \parallel \seman{Q}\right) &
\seman{1} &\eqdef \begin{tikzpicture}[baseline]
\node (1) at (0,0.5) [draw=red,ellipse] {call};
\node (2) at (0,-0.5) [draw=green,ellipse] {done};
\draw [->] (1) -- (2);
\end{tikzpicture}
\\ %%%%%%%%%%%%%%%%%%%%%%%%%
\seman{(a : \chan) \cdot P} &\eqdef \left(
\begin{tikzpicture}[baseline,scale=0.8]
\node (4) at (0,0.65) [draw=green,ellipse] {call $P$};
\node (5) at (0,-0.65) [draw=red,ellipse] {done $P$};
\node (2) at (2.5,0.65) [draw=green,ellipse] {call $a$};
\node (3) at (2.5,-0.65) [draw=red,ellipse] {done $a$};
\node (0) at (5,0.65) [draw=red,ellipse] {call};
\node (1) at (5,-0.65) [draw=green,ellipse] {done};
\path[->]
(0) edge (1)
edge [bend right] (2)
(2) edge (3)
(4) edge (5)
(3) edge (4)
(5) edge [bend right] (1);
\end{tikzpicture}
\right) \strComp \left(\seman{P} \parallel \seman{a}\right) &
\seman{0} &\eqdef \begin{tikzpicture}[baseline]
\node (1) at (0,0.2) [draw=red,ellipse] {call};
\end{tikzpicture}
\\ %%%%%%%%%%%%%%%%%%%%%%%%%
\seman{(\nu a) P} &\eqdef \left(
\begin{tikzpicture}[baseline,scale=0.8]
\node (6) at (0,0.65) [draw=green,ellipse] {call $a$};
\node (7) at (0,-0.65) [draw=red,ellipse] {done $a$};
\node (4) at (2.5,0.65) [draw=green,ellipse] {call $\bar{a}$};
\node (5) at (2.5,-0.65) [draw=red,ellipse] {done $\bar{a}$};
\node (2) at (5,0.65) [draw=green,ellipse] {call $P$};
\node (3) at (5,-0.65) [draw=red,ellipse] {done $P$};
\node (0) at (7.5,0.65) [draw=red,ellipse] {call};
\node (1) at (7.5,-0.65) [draw=green,ellipse] {done};
\path[->]
(0) edge (1)
edge [bend right] (2)
(2) edge (3)
(3) edge [bend right] (1)
(4) edge (5)
edge (7)
(6) edge (7)
edge (5);
\end{tikzpicture}
\right) \strComp \seman{P} &
\end{align*}
\caption{\llccs{} interpretation as strategies}\label{fig:llccs:interp}
\end{figure}
%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%
\subsection{Adequacy} \subsection{Adequacy}
@ -710,7 +909,7 @@ each variable in the environment can and must be used exactly once.
\subsection{Definition} \subsection{Definition}
The linear lambda calculus we use has the same syntax as the usual simply-typed The linear lambda calculus we use has the same syntax as the usual simply-typed
lambda calculus with type annotations and tensor product: lambda calculus with type annotations:
\medskip \medskip
@ -735,23 +934,6 @@ This explains the definition of $\seman{\lambda x^A \cdot t}$: $\seman{t}$
plays on $\seman{A}^\perp \parallel \seman{B}$, same as $\seman{\lambda x^A plays on $\seman{A}^\perp \parallel \seman{B}$, same as $\seman{\lambda x^A
\cdot t}$. \cdot t}$.
\begin{figure}
\begin{minipage}[t]{0.45\textwidth} \begin{align*}
\seman{x^A} &\eqdef \cc_{\seman{A}} \\
\seman{t^{A \linarrow B}~u^{A}} &\eqdef
\cc_{A \linarrow B} \strComp \left( \seman{t} \parallel \seman{u}
\right) \\
\seman{\lambda x^A \cdot t} &\eqdef \seman{t} \\
\seman{t \tens u} &\eqdef \seman{t} \parallel \seman{u}
\end{align*} \end{minipage} \hfill
\begin{minipage}[t]{0.45\textwidth} \begin{align*}
\seman{\alpha} &\eqdef \ominus \\
\seman{A \linarrow B} &\eqdef \seman{A}^\perp \parallel \seman{B} \\
\seman{A \Tens B} &\eqdef \seman{A} \parallel \seman{B}
\end{align*}\end{minipage}
\caption{Interpretation of linear lambda calculus}\label{fig:llam:interp}
\end{figure}
\subsection{Implementation} \subsection{Implementation}
The implementation, which was supposed to be fairly simple, turned out to be The implementation, which was supposed to be fairly simple, turned out to be
@ -770,101 +952,6 @@ the variables introduction order, thus intertwining $\Gamma$ and $\Delta$.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Linear \lccs} \section{Linear \lccs}
\begin{figure}[h]
\begin{align*}
\seman{P \parallel Q} &\eqdef \left(
\begin{tikzpicture}[baseline, scale=0.8]
\node (4) at (0,0.65) [draw=green,ellipse] {call $P$};
\node (5) at (0,-0.65) [draw=red,ellipse] {done $P$};
\node (2) at (2.5,0.65) [draw=green,ellipse] {call $Q$};
\node (3) at (2.5,-0.65) [draw=red,ellipse] {done $Q$};
\node (0) at (5,0.65) [draw=red,ellipse] {call};
\node (1) at (5,-0.65) [draw=green,ellipse] {done};
\path[->]
(0) edge (1)
edge [bend right] (2)
edge [bend right] (4)
(2) edge (3)
(4) edge (5)
(3) edge [bend right] (1)
(5) edge [bend right] (1);
\end{tikzpicture}
\right) \strComp \left(\seman{P} \parallel \seman{Q}\right) &
\seman{\proc} = \seman{\chan} &\eqdef \begin{tikzpicture}[baseline]
\node (1) at (0,0.5) [draw=red,ellipse] {call};
\node (2) at (0,-0.5) [draw=green,ellipse] {done};
\draw [->] (1) -- (2);
\end{tikzpicture}
\\ %%%%%%%%%%%%%%%%%%%%%%%%%
\seman{P \cdot Q} &\eqdef \left(
\begin{tikzpicture}[baseline,scale=0.8]
\node (4) at (0,0.65) [draw=green,ellipse] {call $P$};
\node (5) at (0,-0.65) [draw=red,ellipse] {done $P$};
\node (2) at (2.5,0.65) [draw=green,ellipse] {call $Q$};
\node (3) at (2.5,-0.65) [draw=red,ellipse] {done $Q$};
\node (0) at (5,0.65) [draw=red,ellipse] {call};
\node (1) at (5,-0.65) [draw=green,ellipse] {done};
\path[->]
(0) edge (1)
edge [bend right] (4)
(2) edge (3)
(4) edge (5)
(3) edge [bend right] (1)
(5) edge (2);
\end{tikzpicture}
\right) \strComp \left(\seman{P} \parallel \seman{Q}\right) &
\seman{1} &\eqdef \begin{tikzpicture}[baseline]
\node (1) at (0,0.5) [draw=red,ellipse] {call};
\node (2) at (0,-0.5) [draw=green,ellipse] {done};
\draw [->] (1) -- (2);
\end{tikzpicture}
\\ %%%%%%%%%%%%%%%%%%%%%%%%%
\seman{(a : \chan) \cdot P} &\eqdef \left(
\begin{tikzpicture}[baseline,scale=0.8]
\node (4) at (0,0.65) [draw=green,ellipse] {call $P$};
\node (5) at (0,-0.65) [draw=red,ellipse] {done $P$};
\node (2) at (2.5,0.65) [draw=green,ellipse] {call $a$};
\node (3) at (2.5,-0.65) [draw=red,ellipse] {done $a$};
\node (0) at (5,0.65) [draw=red,ellipse] {call};
\node (1) at (5,-0.65) [draw=green,ellipse] {done};
\path[->]
(0) edge (1)
edge [bend right] (2)
(2) edge (3)
(4) edge (5)
(3) edge (4)
(5) edge [bend right] (1);
\end{tikzpicture}
\right) \strComp \left(\seman{P} \parallel \seman{a}\right) &
\seman{0} &\eqdef \begin{tikzpicture}[baseline]
\node (1) at (0,0.2) [draw=red,ellipse] {call};
\end{tikzpicture}
\\ %%%%%%%%%%%%%%%%%%%%%%%%%
\seman{(\nu a) P} &\eqdef \left(
\begin{tikzpicture}[baseline,scale=0.8]
\node (6) at (0,0.65) [draw=green,ellipse] {call $a$};
\node (7) at (0,-0.65) [draw=red,ellipse] {done $a$};
\node (4) at (2.5,0.65) [draw=green,ellipse] {call $\bar{a}$};
\node (5) at (2.5,-0.65) [draw=red,ellipse] {done $\bar{a}$};
\node (2) at (5,0.65) [draw=green,ellipse] {call $P$};
\node (3) at (5,-0.65) [draw=red,ellipse] {done $P$};
\node (0) at (7.5,0.65) [draw=red,ellipse] {call};
\node (1) at (7.5,-0.65) [draw=green,ellipse] {done};
\path[->]
(0) edge (1)
edge [bend right] (2)
(2) edge (3)
(3) edge [bend right] (1)
(4) edge (5)
edge (7)
(6) edge (7)
edge (5);
\end{tikzpicture}
\right) \strComp \seman{P} &
\end{align*}
\caption{CCS interpretation as strategies}\label{fig:lccs:interp}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%
\bibliography{biblio} \bibliography{biblio}