From 23a7d3a63eeb22582f38e2f17382ced803ce6275 Mon Sep 17 00:00:00 2001
From: Tim Daly
Date: Mon, 16 Apr 2018 02:48:54 0400
Subject: [PATCH] books/bookvolbib add references
MIMEVersion: 1.0
ContentType: text/plain; charset=UTF8
ContentTransferEncoding: 8bit
Goal: Proving Axiom Correct
\index{Francez, Nissim}
\begin{chunk}{axiom.bib}
@book{Fran92,
author = "Francez, Nissim",
title = {{Program Verification}},
year = "1992",
publisher = "AddisonWesley",
isbn = 0201416085"
}
\end{chunk}
\index{Appel, Andrew W.}
\begin{chunk}{axiom.bib}
@book{Appe92,
author = "Appel, Andrew W.",
title = {{Compiling with Continuations}},
year = "1992",
publisher = "Cambridge University Press",
isbn = "052103311X"
}
\end{chunk}
\index{Appel, Andrew W.}
\begin{chunk}{axiom.bib}
@book{Appe98,
author = "Appel, Andrew W.",
title = {{Modern Compiler Implementation in ML}},
year = "1998",
publisher = "Cambridge University Press",
isbn = "0521607647"
}
\end{chunk}
\index{Paulson, L.C.}
\begin{chunk}{axiom.bib}
@article{Paul96,
author = "Paulson, L.C.",
title = {{ML for the WOrking Programmer 2nd Edition}},
year = "1996",
publisher = "Cambridge University Press",
isbn = "052156543X"
}
\end{chunk}
\index{Pollack, Robert}
\begin{chunk}{axiom.bib}
@phdthesis{Poll94,
author = "Pollack, Robert",
title = {{The Theory of LEGO  A Proof Checker for the Extended Calculus
of Constructions}},
school = "University of Edinburgh",
link =
"\url{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.38.2610}",
year = "1994",
abstract =
"LEGO is a computer program for interactive typechecking in the
Extended Calculus of Constructions and two of its subsystems. LEGO
also supports the extension of these three systems with inductive
types. These type systems can be viewed as logics, and as meta
languages for expressing logics, and LEGO is intended to be used for
interactively constructing proofs in mathematical theories presented
in these logics. I have developed LEGO over six years, starting from
an implementation of the Calculus of Constructions by Gerard
Huet. LEGO has been used for problems at the limits of our abilities
to do formal mathematics.
In this thesis I explain some aspects of the metatheory of LEGO’s
type systems leading to a machinechecked proof that typechecking is
decidable for all three type theories supported by LEGO, and to a
verified algorithm for deciding their typing judgements, assuming only
that they are normalizing. In order to do this, the theory of Pure
Type Systems (PTS) is extended and formalized in LEGO. This extended
example of a formally developed body of mathematics is described, both
for its main theorems, and as a case study in formal mathematics. In
many examples, I compare formal definitions and theorems with their
informal counterparts, and with various alternative approaches, to
study the meaning and use of mathematical language, and suggest
clarifications in the informal usage.
Having outlined a formal development far too large to be surveyed in
detail by a human reader, I close with some thoughts on how the human
mathematician’s state of understanding and belief might be affected by
posessing such a thing.",
paper = "Poll94.pdf"
}
\end{chunk}
\index{Pollack, Robert}
\begin{chunk}{axiom.bib}
@article{Poll94a,
author = "Pollack, Robert",
title = {{On Extensibility of Proof Checkers}},
journal = "LNCS",
volume = "996",
pages = "140161",
year = "1994",
abstract =
"My suggestion is little different from LCF, just replacing one
computational meta language (ML) with another (ECC, FS0,...). The
philosophical point is that it is then possible to accept non
canonical proof notations as object level proofs, removing the need to
actually normalize them. There are problems to be worked out in
practice, such as extraction of programs from constructive proof, and
efficient execution of pure, total programs. Although this approach
doesn't address the difficulty of proving correctness of tactics in
the meta level, it is immediatly useful for tactics with structural
justification (e.g. weakening) which are not even representable in
LCF, and are infeasible in the Nuprl variant of LCF. Since it can be
used for any object system without adding new principles such as
reflection, and is compatible with other approaches to extensibility
(especially partial reflection), it should be considered as part of
the answer to extensibility in proof checkers.",
paper = "Poll94a.pdf"
}
\end{chunk}
\index{Pfenning, Frank}
\index{Elliott, Conal}
\begin{chunk}{axiom.bib}
@inproceedings{Pfen88a,
author = "Pfenning, Frank and Elliott, Conal",
title = {{HigherOrder Abstract Syntax}},
booktitle = "Symp on Language Design and Implementation PLDI'88",
publisher = "ACM",
link = "\url{https://www.cs.cmu.edu/~fp/papers/pldi88.pdf}",
year = "1988",
abstract =
"We describe motivation, design, use, and implementation of
higherorder abstract syntax as a central representation for
programs, formulas, rules, and other syntactic objects in program
manipulation and other formal systems where matching and substitution
or unification are central operations. Higherorder abstract syntax
incorporates name binding information in a uniform and language
generic way. Thus it acts as a powerful link integrating diverse
tools in such formal environments. We have implemented higherorder
abstract syntax, a supporting matching and unification algorithm, and
some clients in Common Lisp in the framework of the Ergo project at
Carnegie Mellon University.",
paper = "Pfen88a.pdf",
keywords = "printed"
}
\end{chunk}
\index{McDowell, Raymond}
\index{Miller, Dale}
\begin{chunk}{axiom.bib}
@article{Mcdo02,
author = "McDowell, Raymond and Miller, Dale",
title = {{Reasoning with HigherOrder Abstract Syntax in a Logical
Framework}},
journal = "ACM Trans. Computational Logic",
volume = "3",
year = "2002",
pages = "80136",
link = "\url{http://www.lix.polytechnique.fr/~dale/papers/mcdowell01.pdf}",
abstract =
"Logical frameworks based on intuitionistic or linear logics with
highertype quantification have been successfully used to give
highlevel, modular, and formal specifications of many important
judgments in the area of programming languages and inference systems.
Given such specifications, it is natural to consider proving
properties about the specified systems in the framework: for example,
given the specification of evaluation for a functional programming
language, prove that the language is deterministic or that evaluation
preserves types. One challenge in developing a framework for such
reasoning is that higherorder abstract syntax (HOAS), an elegant and
declarative treatment of objectlevel abstraction and substitution, is
difficult to treat in proofs involving induction. In this paper, we
present a metalogic that can be used to reason about judgments coded
using HOAS; this metalogic is an extension of a simple intuitionistic
logic that admits higherorder quantification over simply typed
$\lambda$terms (key ingredients for HOAS) as well as induction and a
notion of definition. The latter concept of definition is a
prooftheoretic device that allows certain theories to be treated as
'closed' or as defining fixed points. We explore the difficulties of
formal metatheoretic analysis of HOAS encodings by considering
encodings of intuitionistic and linear logics, and formally derive the
admissibility of cut for important subsets of these logics. We then
propose an approach to avoid the apparent tradeoff between the
benefits of higherorder abstract syntax and the ability to analyze
the resulting encodings. We illustrate this approach through examples
involving the simple functional and imperative programming languages
$PCF$ and $PCF_{:=}$. We formally derive such properties as unicity of
typing, subject reduction, determinacy of evaluation, and the
equivalence of transition semantics and natural semantics
presentations of evaluation.",
paper = "Mcdo02.pdf"
}
\end{chunk}
\index{Barras, Bruno}
\begin{chunk}{axiom.bib}
@techreport{Barr96a,
author = "Barras, Bruno",
title = {{Coq en Coq}},
institution = "INRIA",
year = "1996",
type = "Research Report",
number = "inria00073667",
comment = "French",
abstract =
"The essential step of the formal verification of a proofchecker such
as Coq is the verification of its kernel: a typechecker for the
{\sl Calculus of Inductive Constructions} (CIC) whihc is its underlying
formalism. The present work is a first smallscale attempt on a
significative fragment of CIC: the Calculus of Constructions (CC). We
formalize the definition and the metatheory of (CC) in Coq. In
particular, we prove strong normalization and decidability of type
inference. From the latter proof, we extract a certified {\sl Caml
Light} program, which performs type inference (or typechecking) for
an arbitrary typing judgement in CC. Integrating this program in a
larger system, including a parser and prettyprinter, we obtain a
standalone proofchecker, called {\sl CoC}, the {\sl Calculus of
Constructions}. As an example, the formal proof of Newman's lemma,
build with Coq, can be reverified by {\sl CoC} with reasonable
performance.",
paper = "Barr96a.pdf"
}
\end{chunk}
\index{Barras, Bruno}
\index{Werner, Benjamin}
\begin{chunk}{axiom.bib}
@misc{Barr18,
author = "Barras, Bruno and Werner, Benjamin",
title = {{Coq in Coq}},
link =
"\url{http://www.lix.polytechnique.fr/Labo/Bruno.Barras/publi/coqincoq.pdf}",
comment = "https://github.com/coqcontribs/coqincoq",
abstract =
"We formalize the definition and the metatheory of the Calculus of
Constructions (CC) using the proof assistant Coq. In particular,
we prove strong normalization and decidability of type
inference. From the latter proof, we extract a certified Objective
Caml program which performs type inference in CC and use this code
to build a smallscale certified proofchecker.",
paper = "Barr18.pdf",
keywords = "printed"
}
\end{chunk}
\index{Huet, G\'erard P.}
\begin{chunk}{axiom.bib}
@article{Huet75,
author = "Huet, Gerard P.",
title = {{A Unification Algorithm for typed $\lambda$Calculus}},
journal = "Theoretical Computer Science",
volume = "1",
number = "1",
pages = "2557",
year = "1975",
abstract =
"A semidecision algorithm is presented, to search for unification of
formulas in typed $\omega$order $\lambda$calculus, and its
correctness is proved.
It is shown that the search space is significantly smaller than the
one for finding the most general unifiers. In particluar, our search
is not redundant. This allows our algorithm to have good
directionality and convergence properties.",
paper = "Huet75.pdf"
}
\end{chunk}
\index{Huet, G\'erard P.}
\index{Lang, Bernard}
\begin{chunk}{axiom.bib}
@article{Huet78,
author = "Huet, Gerard P. and Lang, Bernard",
title = {{Proving and Applying Program Transformations Expressed
with SecondOrder Patterns}},
journal = "Acta Informatica",
volume = "11",
number = "1",
pages = "3155",
year = "1978",
abstract =
"We propose a program transformation method based on rewritingrules
composed of secondorder schemas. A complete secondorder matching
algorithm is presented that allows effective use of these rules. We
show how to formally prove the correctness of the rules using a
denotational semantics for the programming language. We establish the
correctness of the transformation method itself, and give techniques
pertaining to its actual implementation. The paper is illustrated with
recursion removal examples."
}
\end{chunk}
\index{Hamming, Richard}
\begin{chunk}{axiom.bib}
@misc{Hamm95,
author = "Hamming, Richard",
title = {{Hamming, 'You and Your Research'}},
link = "\url{https://www.youtube.com/watch?v=a1zDuOPkMSw}",
year = "1995"
}
\end{chunk}
\index{Dolan, Stephen}
\begin{chunk}{axiom.bib}
@phdthsis{Dola16,
author = "Dolan, Stephen",
title = {{Algebraic Subtyping}},
school = "University of Cambridge",
year = "2016",
link = "\url{https://www.cl.cam.ac.uk/~sd601/thesis.pdf}",
abstract =
"Type inference gives programmers the benefit of static,
compiletime type checking without the cost of manually specifying
types, and has long been a standard feature of functional programming
languages. However, it has proven difficult to integrate type
inference with subtyping, since the unification engine at the core of
classical type inference accepts only equations, not subtyping
constraints.
This thesis presents a type system combining MLstyle parametric
polymorphism and subtyping, with type inference, principal types,
and decidable type subsumption. Type inference is based on {\sl
biunification} , an analogue of unification that works with
subtyping constraints.
Making this possible are several contributions, beginning with the
notion of an 'extensible' type system, in which an open world of
types is assumed, so that no typeable program becomes untypeable
by the addition of new types to the language. While previous
formulations of subtyping fail to be extensible, this thesis
shows that adopting a more algebraic approach can remedy this.
Using such an approach, this thesis develops the theory of
biunification, shows how it is used to infer types, and shows how
it can be efficiently implemented, exploiting deep connections
between the algebra of regular languages and polymorphic subtyping.",
paper = "Dola16.pdf"
}
\end{chunk}
\index{Downen, Paul}
\index{Maurer, Luke}
\index{Ariola, Zena M.}
\begin{chunk}{axiom.bib}
@inproceedings{Down16,
author = "Downen, Paul and Maurer, Luke and Ariola, Zena M.",
title = {{Sequent Calculus as a Compiler Intermediate Language}},
booktitle = "ICFP'16",
year = "2016",
pages = "7488",
publisher = "ACM",
isbn = "9781450342193",
abstract =
"The $\lambda$calculus is popular as an intermediate language for
practical compilers. But in the world of logic it has a
lesserknown twin, born at the same time, called the {\sl sequent
calculus}. Perhaps that would make for a good intermediate
language, too? To explore this question we designed Sequent Core,
a practicallyoriented core calculus based on the sequent
calculus, and used it to reimplement a substantial chunk of the
Glasgow Haskell Compiler.",
paper = "Down16.pdf"
}
\end{chunk}
\index{Zeilberger, Noam}
\begin{chunk}{axiom.bib}
@phdthesis{Zeil09,
author = "Zeilberger, Noam",
title = {{The Logical Basis of Evaluation Order and PatternMatching}},
school = "Carnegie Mellon University",
year = "2009",
link = "\url{https://www.cs.cmu.edu/~rwh/theses/zeilberger.pdf}",
abstract =
"An old and celebrated analogy says that writing programs is like
proving theorems. This analogy has been productive in both
directions, but in particular has demonstrated remarkable utility in
driving progress in programming languages, for example leading towards
a better understanding of concepts such as abstract data types and
polymorphism. One of the best known instances of the analogy actually
rises to the level of an isomorphism: between Gentzen’s natural
deduction and Church’s lambda calculus. However, as has been
recognized for a while, lambda calculus fails to capture some of the
important features of modern programming languages. Notably, it does
not have an inherent notion of evaluation order, needed to make sense
of programs with side effects. Instead, the historical descendents of
lambda calculus (languages like Lisp, ML, Haskell, etc.) impose
evaluation order in an ad hoc way.
This thesis aims to give a fresh take on the proofsasprograms
analogy—one which better accounts for features of modern programming
languages—by starting from a different logical foundation. Inspired
by Andreoli’s focusing proofs for linear logic, we explain how to
axiomatize certain canonical forms of logical reasoning through a
notion of pattern. Propositions come with an intrinsic polarity,
based on whether they are defined by patterns of proof, or by patterns
of refutation. Applying the analogy, we then obtain a programming
language with builtin support for patternmatching, in which
evaluation order is explicitly reflected at the level of types—and
hence can be controlled locally, rather than being an ad hoc, global
policy decision. As we show, different forms of continuationpassing
style (one of the historical tools for analyzing evaluation order)
can be described in terms of different polarizations. This language
provides an elegant, uniform account of both untyped and
intrinsicallytyped computation (incorporating ideas from infinitary
proof theory), and additionally, can be provided an extrinsic type
system to express and statically enforce more refined properties of
programs. We conclude by using this framework to explore the theory of
typing and subtyping for intersection and union types in the presence
of effects, giving a simplified explanation of some of the unusual
artifacts of existing systems.",
paper = "Zeil09.pdf"
}
\end{chunk}
\index{Girard, JeanYves}
\begin{chunk}{axiom.bib}
@misc{Giar95,
author = "Girard, JeanYves",
title = {{Linear Logic: Its Syntax and Semantics}},
year = "1995",
paper = "Gira95.pdf",
keywords = "printed"
}
\end{chunk}
\index{Dolan, Stephen}
\index{Mycroft, Alan}
\begin{chunk}{axiom.bib}
@article{Dola17,
author = "Dolan, Stephen and Mycroft, Alan",
title = {{Polymorphism, Subtyping, and Type Inference in MLsub}},
journal = "ACM SIGPLAN Notices",
volume = "52",
number = "1",
year = "2017",
pages = "6072",
link = "\url{https://www.cl.cam.ac.uk/~sd601/papers/mlsubpreprint.pdf}",
abstract =
"We present a type system combining subtyping and MLstyle parametric
polymorphism. Unlike previous work, our system supports type inference
and has compact principal types. We demonstrate this system in the
minimal language MLsub, which types a strict superset of core ML
programs.
This is made possible by keeping a strict separation between the types
used to describe inputs and those used to describe outputs, and
extending the classical unification algorithm to handle subtyping
constraints between these input and output types. Principal types are
kept compact by type simplification, which exploits deep connections
between subtyping and the algebra of regular languages. An
implementation is available online.",
paper = "Dola17.pdf"
}
\end{chunk}
\index{BonnaireSergeant, Ambrose}
\begin{chunk}{axiom.bib}
@misc{Bonn18,
author = "BonnaireSergeant, Ambrose",
title = {{Are unsound type systems wrong?}},
link =
"\url{http://frenchy64.github.io/2018/04/07/unsoundnessinuntypedtypes.html}",
}
\end{chunk}
\index{Tuhola, Henri}
\begin{chunk}{axiom.bib}
@misc{Tuho18,
author = "Tuhola, Henri",
title = {{An another MLsub study}},
link = "\url{http://boxbase.org/entries/2018/mar/12/mlsubstudy/}",
year = "2018",
comment = "Subtyping does not solve the problem of coercion"
}
\end{chunk}
\index{Tuhola, Henri}
\begin{chunk}{axiom.bib}
@misc{Tuho18a,
author = "Tuhola, Henri",
title = {{Boxbase  Index}},
link = "\url{http://boxbase.org/}",
year = "2018",
keywords = "axiomref"
}
\end{chunk}
\index{Tarditi, David}
\index{Acharya, Anurag}
\index{Lee, Peter}
\begin{chunk}{axiom.bib}
@techreport{Tard90,
author = "Tarditi, David and Acharya, Anurag and Lee, Peter",
title = {{No Assembly Required: Compiling Standard ML to C}},
type = "technical report",
institution = "Carnegie Mellon University",
number = "CMUCS90187",
year = "1990",
abstract =
"C has been proposed as a portable target language for
implementing higherorder languages. Previous efforts at compiling
such languages to C have produced efficient code, but have had to
compromise on either the portability or the preservation of the
tailrecursive properties of the languages. We assert that neither
of these compromises is necessary for the generation of efficient
code. We offer a Standard ML to C compiler, which does not make
wither of these compromises, as an existence proof. The generated
code achieves an execution speed that is just a factor of two
slower than the best native code compiler. In this paper, we
describe the design, implementation and the performance of this
compiler.",
paper = "Tard90.pdf",
keywords = "printed"
}
\end{chunk}
\index{Appel, Andrew W.}
\index{Jim, Trevor}
\begin{chunk}{axiom.bib}
@techreport{Appe88,
author = "Appel, Andrew W. and Jim, Trevor",
title = {{ContinuationPassing, ClosurePassing Style}},
institution = "Princston University",
year = "1988",
number = "CSTR18388",
abstract =
"We implemented a continuationpassing style (CPS) code generator for
ML. Our CPS language is represented as an ML datatype in which all
functions are named and most kinds of illformed expressions are
impossible. We separate the code generation into phases that rewrite
this representation into eversimpler forms. Closures are represented
explicitly as records, so that closure strategies can be communicated
from one phase to another. No stack is used. Our benchmark data shows
that the new method is an improvement over our previous,
abstractmachine based code generator.",
paper = "Appe88.pdf",
keywords = "printed"
}
\end{chunk}
\index{Appel, Andrew W.}
\index{MacQueen, David B.}
\begin{chunk}{axiom.bib}
@article{Appe87,
author = "Appel, Andrew W. and MacQueen, David B.",
title = {{A Standard ML Compiler}},
journal = "LNCS",
volume = "274",
pages = "301324",
year = "1987",
abstract =
"Standard ML is a major revision of earlier dialects of the functional
language ML. We describe the first compiler written for Standard ML in
Standard ML. The compiler incorporates a number of novel features and
techniques, and is probably the largest system written to date in
Standard ML.
Great attention was paid to modularity in the construction of the
compiler, leading to a successful largescale test of the modular
capabilities of Standard ML. The front end is useful for purposes
other than compilation, and the back end is easily retargetable (we
have code generators for the VAX and MC68020). The module facilities
of Standard ML were taken into account early in the design of the
compiler, and they particularly influenced the environment management
component of the front end. For example, the symbol table structure is
designed for fast access to opened structures.
The front end of the compiler is a single phase that integrates
parsing, environment management, and type checking. The middle end
uses a sophisticated decision tree scheme to produce efficient
pattern matching code for functions and case expressions. The abstract
syntax produced by the front end is translated into a simple
lambdacalculusbased intermediate representation that lends itself to
easy case analysis and optimization in the code generator. Special
care was taken in designing the mntime data structures for fast
allocation and garbage collection.
We describe the overall organization of the compiler and present some
of the data representations and algorithms used in its various
phases. We conclude with some lessons learned about the ML language
itself and about compilers for modem functional languages.",
paper = "Appe87.pdf",
keywords = "printed"
}
\end{chunk}
\index{Steele, Guy Lewis}
\begin{chunk}{axiom.bib}
@techreport{Stee78,
author = "Steele, Guy Lewis",
title = {{RABBIT: A Compiler for SCHEME}
type = "technical report",
number = "AITR474",
year = "1978",
abstract =
"We have developed a compiler for the lexicallyscoped dialect of LISP
known as SCHEME. The compiler knows relatively little about specific
data manipulation primitives such as arithmetic operators, but
concentrates on general issues of environment and control. Rather than
having specialized knowledge about a large variety of control and
environment constructs, the compiler handles only a small basis set
which reflects the semantics of lambdacalculus. All of the
traditional imperative constructs, such as sequencing, assignment,
looping, GOTO, as well as many standard LISP constructs such as AND,
OR, and COND, are expressed in macros in terms of the applicative
basis set. A small number of optimization techniques, coupled with the
treatment of function calls as GOTO statements, serve to produce code
as good as that produced by more traditional compilers. The macro
approach enables speedy implementation of new constructs as desired
without sacrificing efficiency in the generated code.
A fair amount of analysis is devoted to determining whether
environments may be stackallocated or must be heapallocated.
Heapallocated environments are necessary in general because SCHEME
(unlike Algol 60 and Algol 68, for example) allows procedures with
free lexically scoped variables to be returned as the values of other
procedures; the Algol stackallocation environment strategy does not
suffice. The methods used here indicate that a heapallocating
generalization of the 'display' technique leads to an efficient
implementation of such 'upward funargs'. Moreover, compiletime
optimization and analysis can eliminate many 'funargs' entirely, and
so far fewer environment structures need be allocated at run time than
might be expected.
A subset of SCHEME (rather than triples, for example) serves as the
representation intermediate between the optimized SCHEME code and the
final output code; code is expressed in this subset in the socalled
continuationpassing style. As a subset of SCHEME, it enjoys the same
theoretical properties; one could even apply the same optimizer used
on the input code to the intermediate code. However, the subset is so
chosen that all temporary quantities are made manifest as variables,
and no control stack is needed to evaluate it. As a result, this
apparently applicative representation admits an imperative
interpretation which permits easy transcription to final imperative
machine code. These qualities suggest that an applicative language
like SCHEME is a better candidate for an UNCOL than the more
imperative candidates proposed to date.",
paper = "Stee78.pdf"
}
\end{chunk}
\index{PeytonJones, Simon}
\begin{chunk}{axiom.bib}
@misc{Peyt17,
author = "PeytonJones, Simon",
title = {{Escape from the ivory tower: the Haskell journey}},
link = "\url{https://www.youtube.com/watch?v=re96UgMk6GQ}",
year = "2017"
}
\end{chunk}
\index{Wadler, Philip}
\begin{chunk}{axiom.bib}
@article{Wadl92,
author = "Wadler, Philip",
title = {{Comprehending Monads}},
journal = "Mathematical Structures in Computer Science",
volume = "2",
pages = "461493",
year = "1992",
abstract =
"Category theorists invented {\sl monads} in the 1960s to
concisely express certain aspects of universal algebra. Functional
programmers invented {\sl list comprehensions} in the 1970s ot
concisely express certain programs involving lists. This paper
shows how list comprehensions may be generalised to an arbitrary
monad, and how the resulting programming feature can concisely
express in a pure functional language some programs that
manipulate state, handle exceptions, parse text, or invoke
continuations. A new solution to the old problem of destructive
array update is also presented. No knowledge of category theory is
assumed.",
paper = "Wadl92.pdf"
}
\end{chunk}
\index{PeytonJones, Simon}
\index{Wadler, Philip}
\begin{chunk}{axiom.bib}
@inproceedings{Peyt93,
author = "PeytonJones, Simon and Wadler, Philip",
title = {{Imperative Functional Programming}},
booktitle = "Principles of Programming Languages POPL'93",
publisher = "ACM",
year = "1993",
pages = "7184",
abstract =
"We present a new model, based on monads, for performing
input/output in a nonstrict, purely functional language. It is
composable, extensible, efficient, requires no extensions to the
type system, and extends smoothly to incorporate mixedlanguage
working and inplace array updates.",
paper = "Peyt93.pdf"
}
\end{chunk}
\index{Hughes, John}
\begin{chunk}{axiom.bib}
@inproceedings{Hugh90,
author = "Hughes, John",
title = {{Why Functional Programming Matters}},
booktitle = "Research Topics in Functional Programming",
publisher = "AddisonWesley",
year = "1990",
pages = "1742",
abstract =
"As software becomes more and more complex, it is more and more
important to structure it well. Wellstructured software is easy to
write and to debug, and provides a collection of modules that can be
reused to reduce future programming costs. In this paper we show that
two fea tures of functional languages in particular, higherorder
functions and lazy evaluation, can contribute significantly to
modularity. As examples, we manipulate lists and trees, program
several numerical algorithms, and implement the alphabeta heuristic
(an algorithm from Artificial Intelligence used in gameplaying
programs). We conclude that since modularity is the key to successful
programming, functional programming offers important advantages for
software development.",
paper = "Hugh90.pdf",
keywords = "printed"
}
\end{chunk}
\index{R\'emy, Didier}
\begin{chunk}{axiom.bib}
@misc{Remy17,
author = "Remy, Didier",
title = {{Type Systems for Programming Languages}},
ywar = "2017"
link = "\url{http://gallium.inria.fr/~remy/mpri/cours1.pdf}",
link = "\url{http://gallium.inria.fr/~remy/mpri/cours2.pdf}",
link = "\url{http://gallium.inria.fr/~remy/mpri/cours3.pdf}",
link = "\url{http://gallium.inria.fr/~remy/mpri/cours4.pdf}",
link = "\url{http://gallium.inria.fr/~remy/mpri/cours5.pdf}",
paper = "Remy17.tgz"
}
\end{chunk}
\index{Wadler, Philip}
\begin{chunk}{axiom.bib}
@article{Wadl07,
author = "Wadler, Philip",
title = {{The GirardReynolds isomorphism (second edition)}},
journal = "Theoretical Computer Science",
volume = "375",
pages = "201226",
year = "2007",
abstract =
"eanYves Girard and John Reynolds independently discovered the
secondorder polymorphic lambda calculus, F2. Girard additionally
proved a Representation Theorem : every function on natural numbers
that can be proved total in secondorder intuitionistic predicate
logic, P2, can be represented in F2. Reynolds additionally proved an
Abstraction Theorem : every term in F2 satisfies a suitable notion of
logical relation; and formulated a notion of parametricity satisfied
by wellbehaved models.
We observe that the essence of Girard’s result is a projection from P2
into F2, and that the essence of Reynolds’s result is an embedding of
F2 into P2, and that the Reynolds embedding followed by the Girard
projection is the identity. We show that the inductive naturals are
exactly those values of type natural that satisfy Reynolds’s notion of
parametricity, and as a consequence characterize situations in which
the Girard projection followed by the Reynolds embedding is also the
identity.
An earlier version of this paper used a logic over untyped terms. This
version uses a logic over typed term, similar to ones considered by
Abadi and Plotkin and by Takeuti, which better clarifies the
relationship between F2 and P2.",
paper = "Wadl07.pdf"
}
\end{chunk}
\index{Morrisett, Greg}
\index{Walker, David}
\index{Crary, Karl}
\index{Glew, Neil}
\begin{chunk}{axiom.bib}
@article{Morr99,
author = "Morrisett, Greg and Walker, David and Crary, Karl and Glew, Neil",
title = {{From System F to Typed Assembly Language}},
booktitle = "Trans. on Progamming Languages and Systems TOPLAS",
volume = "21"
number = "3",
year = "1999",
pages = "527568",
abstract =
"We motivate the design of typed assembly language (TAL) and present a
typepreserving ttranslation from Systemn F to TAL. The typed assembly
language we pressent is based on a conventional RISC assembly
language, but its static type sytem provides support for enforcing
highlevel language abstratctions, such as closures, tuples, and
userdefined abstract data types. The type system ensures that
welltyped programs cannot violatet these abstractionsl In addition,
the typing constructs admit many lowlevel compiler optimiztaions. Our
translation to TAL is specified as a sequence of typepreserving
transformations, including CPS and closure conversion phases;
typecorrect source programs are mapped to typecorrect assembly
language. A key contribution is an approach to polymorphic closure
conversion that is considerably simpler than previous work. The
compiler and typed assembly lanugage provide a fully automatic way to
produce certified code, suitable for use in systems where unstrusted
and potentially malicious code must be checked for safety before
execution.",
paper = "Morr99.pdf",
keywords = "printed"
}
\end{chunk}
\index{Kranz, David}
\index{Kelsey, Richard}
\index{Rees, Jonathan}
\index{Hudak, Paul}
\index{Philbin, James}
\index{Adams, Norman}
\begin{chunk}{axiom.bib}
@inproceedings{Kran86,
author = "Kranz, David and Kelsey, Richard and Rees, Jonathan and
Hudak, Paul and Philbin, James and Adams, Norman",
title = {{ORBIT: An Optimizing Compiler for Scheme}},
booktitle = "SIGLAN '86",
publisher = "ACM",
pages = "219233",
year = "1986",
abstract =
"In this paper we describe an optimizing compiler for Scheme
called {\sl Orbit} that incorporates our experience with an
earlier Scheme compiler called TC, together with some ideas from
Steele's Rabbit compiler. The three main design goals have been
correctness, generating very efficient compiled code, and
portability.",
paper = "Kran86.pdf",
keywords = "printed"
}
\end{chunk}
\index{Flanagan, Cormac}
\index{Sabry, Amr}
\index{Duba, Bruce F.}
\index{Felleisen, Matthias}
\begin{chunk}{axiom.bib}
@misc{Flan03,
author = "Flanagan, Cormac and Sabry, Amr and Duba, Bruce F. and
Felleisen, Matthias",
title = {{The Essence of Compiling with Continuations}},
link =
"\url{https://www.cs.rice.edu/~javaplt/411/17spring/Readings/essenceretro.pdf}",
paper = "Flan03.pdf",
keywords = "printed"
}
\end{chunk}
\index{Harper, Robert}
\index{Lillibridge, Mark}
\begin{chunk}{axiom.bib}
@inproceedings{Harp93b,
author = "Harper, Robert and Lillibridge, Mark",
title = {{Explicit Polymorphism and CPS Conversion}},
booktitle = "Symp. of Principles of Programming Languages",
publisher = "ACM Press",
year = "1993",
pages = "206=219"
abstract =
"We study the typing properties of CPS conversion for an extension
of F$\omega$ with control operators. Two classes of evaluation
strategies are considered, each with callbyname and callbyvalue
variants. Under the 'standard' strategies, constructor abstractions
are values, and constructor applications can lead to nontrivial
control effects. In contrast, the 'MLlike' strategies evaluate
beneath constructor abstractions, reflecting the usual interpretation
of programs in languages based on implicit polymorphism. Three
continuation passing style sublanguages are considered, one on which
the standard strategies coincide, one on which the MLlike
strategies coincide, and one on which all the strategies coincide.
Compositional, typepreserving CPS transformation algorithms are
given for the standard strategies, resulting in terms on which all
evaluation strategies coincide. This has as a corollary the
soundness and termination of welltyped programs under the standard
evaluation strategies. A similar result is obtained for the MLlike
callbyname strategy . In contrast, such results are obtained for
the callbyvalue MLlike strategy only for a restricted
sublanguage in which constructor abstractions are limited to
values.",
paper = "Harp93b.pdf",
keywords = "printed"
}
\end{chunk}
\index{Sabry, Amr}
\index{Felleisen, Matthias}
\begin{chunk}{axiom.bib}
@techreport{Sabr92,
author = "Sabry, Amr and Felleisen, Matthias",
title = {{Reasoning About Programs in ContinuationPassing Style}},
type = "technical report",
number = "TR 92180",
institution = "Rice University",
abstract =
"Plotkin's $\lambda$value calculus is sound but incomplete for
reasoning about $\beta\nu$transformations on programs in continuation
passing style (CPS). To find a complete extension, we define a new,
compactifying CPS transformation and an 'inverse' mapping,
$un$CPS, both of which are interesting in their own right. Using the
new CPS transformation, we can determine the precise language of CPS
terms closed under $\beta\nu$ transformations. Using the $un$CPS
transformation, we can derive a set of axioms such that every equation
between source programs is provable if and only if $\beta\nu$ can
prove the corresponding equation between CPS programs. The extended
calculus is equivalent to an untyped variant of Moggi's computational
$\lambda$calculus.",
paper = "Sabr92.pdf",
keywords = "printed"
}
\end{chunk}
\index{Phelps, Tom}
\begin{chunk}{axiom.bib}
@misc{Phel92,
author = "Phelps, Tom",
title = {{A Common Lisp CGOL}},
year = "1992",
link = "\url{https://people.eecs.berkeley.edu/~fateman/cgol/cgol.1/cgol.ps}",
abstract =
"CGOL is an Algollike notation for Lisp. The original version,
written by Vaughan Pratt at M.I.T. is the early 70s was written in
Maclisp. This new version is based on Common Lisp. CGOL was translated
to Common Lisp in the following fourstage process:
(1) cgol.tok, the tokenizer has been almost completely rewritten; (2)
cgoll.l, the main translation loop with library of translation schemas
has been converted from Maclisp to Common Lisp; (3) the code that
cgol1.l produces has been converted to Common Lisp; (4) selected
examples of CGOL programs themselves were rewritten, since certain
aspects of the semantics of Maclisp would otherwise not be modelled in
the expected fashion. Maclisp differs from Common Lisp in a variety of
respects, and some of them are apparent from CGOL including direct
escapes to Lisp, variable scoping, function definitions and numerous
other aspects.
In contrast to the programming described above, the major contribution
of this paper is annotation of selected code from the CGOL
translator.",
paper = "Phil92.pdf"
}
\end{chunk}
\index{Duran, Antonio J.}
\index{Perez, Mario}
\index{Varona, Juan L.}
\begin{chunk}{axiom.bib}
@article{Dura14,
author = "Duran, Antonio J. and Perez, Mario and Varona, Juan L.",
title = {{The Misfortunes of a Trio of Mathematicians Using Computer
Algebra Systems. Can We Trust in Them?}},
journal = "Notices of the AMS",
volume = "61",
number = "10",
year = "2014",
link = "\url{www.ams.org/notices/201410/rnotip1249.pdf}",
pages = "12491252",
paper = "Dura14.pdf",
keywords = "printed"
}
\end{chunk}

books/axiom.bib  942 ++++++++++++++++++++++++++
books/bookvolbib.pamphlet  1177 +++++++++++++++++++++++++++++++++
changelog  2 +
patch  1382 +++++++++++++++++++++++++
src/axiomwebsite/patches.html  2 +
5 files changed, 2931 insertions(+), 574 deletions()
diff git a/books/axiom.bib b/books/axiom.bib
index 0306710..c12b08c 100644
 a/books/axiom.bib
+++ b/books/axiom.bib
@@ 1288,7 +1288,7 @@ paper = "Brea89.pdf"
that the important programming features of ML cannot be added to
any impredicative language, such as the GirardReynolds calcus,
without implicitly assuming a type of all types.",
 paper = "Harp92a.pdf"
+ paper = "Harp93a.pdf"
}
@article{Hind69,
@@ 2296,8 +2296,9 @@ paper = "Brea89.pdf"
title = {{Towards a Theory of Type Structure}},
booktitle = "Colloquim on Programming",
year = "1974",
 pages = "911",
 paper = "Reyn74.pdf"
+ pages = "408425",
+ paper = "Reyn74.pdf",
+ keywords = "printed"
}
@inproceedings{Reyn80,
@@ 9182,6 +9183,91 @@ paper = "Brea89.pdf"
link = "\url{http://www.wolfram.com}"
}
+@misc{Anon18,
+ author = "Anonymous",
+ title = {{A Compiler From Scratch}},
+ link = "\url{https://www.destroyallsoftware.com/screencasts/catalog/acompilerfromscratch}",
+ year = "2018"
+}
+
+@article{Appe87,
+ author = "Appel, Andrew W. and MacQueen, David B.",
+ title = {{A Standard ML Compiler}},
+ journal = "LNCS",
+ volume = "274",
+ pages = "301324",
+ year = "1987",
+ abstract =
+ "Standard ML is a major revision of earlier dialects of the functional
+ language ML. We describe the first compiler written for Standard ML in
+ Standard ML. The compiler incorporates a number of novel features and
+ techniques, and is probably the largest system written to date in
+ Standard ML.
+
+ Great attention was paid to modularity in the construction of the
+ compiler, leading to a successful largescale test of the modular
+ capabilities of Standard ML. The front end is useful for purposes
+ other than compilation, and the back end is easily retargetable (we
+ have code generators for the VAX and MC68020). The module facilities
+ of Standard ML were taken into account early in the design of the
+ compiler, and they particularly influenced the environment management
+ component of the front end. For example, the symbol table structure is
+ designed for fast access to opened structures.
+
+ The front end of the compiler is a single phase that integrates
+ parsing, environment management, and type checking. The middle end
+ uses a sophisticated decision tree scheme to produce efficient
+ pattern matching code for functions and case expressions. The abstract
+ syntax produced by the front end is translated into a simple
+ lambdacalculusbased intermediate representation that lends itself to
+ easy case analysis and optimization in the code generator. Special
+ care was taken in designing the mntime data structures for fast
+ allocation and garbage collection.
+
+ We describe the overall organization of the compiler and present some
+ of the data representations and algorithms used in its various
+ phases. We conclude with some lessons learned about the ML language
+ itself and about compilers for modem functional languages.",
+ paper = "Appe87.pdf",
+ keywords = "printed"
+}
+
+@techreport{Appe88,
+ author = "Appel, Andrew W. and Jim, Trevor",
+ title = {{ContinuationPassing, ClosurePassing Style}},
+ institution = "Princston University",
+ year = "1988",
+ number = "CSTR18388",
+ abstract =
+ "We implemented a continuationpassing style (CPS) code generator for
+ ML. Our CPS language is represented as an ML datatype in which all
+ functions are named and most kinds of illformed expressions are
+ impossible. We separate the code generation into phases that rewrite
+ this representation into eversimpler forms. Closures are represented
+ explicitly as records, so that closure strategies can be communicated
+ from one phase to another. No stack is used. Our benchmark data shows
+ that the new method is an improvement over our previous,
+ abstractmachine based code generator.",
+ paper = "Appe88.pdf",
+ keywords = "printed"
+}
+
+@book{Appe92,
+ author = "Appel, Andrew W.",
+ title = {{Compiling with Continuations}},
+ year = "1992",
+ publisher = "Cambridge University Press",
+ isbn = "052103311X"
+}
+
+@book{Appe98,
+ author = "Appel, Andrew W.",
+ title = {{Modern Compiler Implementation in ML}},
+ year = "1998",
+ publisher = "Cambridge University Press",
+ isbn = "0521607647"
+}
+
@article{Back81,
author = "Back, R.J.R",
title = {{On Correct Refinement of Programs}},
@@ 9209,6 +9295,50 @@ paper = "Brea89.pdf"
keywords = "printed"
}
+@techreport{Barr96a,
+ author = "Barras, Bruno",
+ title = {{Coq en Coq}},
+ institution = "INRIA",
+ year = "1996",
+ type = "Research Report",
+ number = "inria00073667",
+ comment = "French",
+ abstract =
+ "The essential step of the formal verification of a proofchecker such
+ as Coq is the verification of its kernel: a typechecker for the
+ {\sl Calculus of Inductive Constructions} (CIC) whihc is its underlying
+ formalism. The present work is a first smallscale attempt on a
+ significative fragment of CIC: the Calculus of Constructions (CC). We
+ formalize the definition and the metatheory of (CC) in Coq. In
+ particular, we prove strong normalization and decidability of type
+ inference. From the latter proof, we extract a certified {\sl Caml
+ Light} program, which performs type inference (or typechecking) for
+ an arbitrary typing judgement in CC. Integrating this program in a
+ larger system, including a parser and prettyprinter, we obtain a
+ standalone proofchecker, called {\sl CoC}, the {\sl Calculus of
+ Constructions}. As an example, the formal proof of Newman's lemma,
+ build with Coq, can be reverified by {\sl CoC} with reasonable
+ performance.",
+ paper = "Barr96a.pdf"
+}
+
+@misc{Barr18,
+ author = "Barras, Bruno and Werner, Benjamin",
+ title = {{Coq in Coq}},
+ link =
+"\url{http://www.lix.polytechnique.fr/Labo/Bruno.Barras/publi/coqincoq.pdf}",
+ comment = "https://github.com/coqcontribs/coqincoq",
+ abstract =
+ "We formalize the definition and the metatheory of the Calculus of
+ Constructions (CC) using the proof assistant Coq. In particular,
+ we prove strong normalization and decidability of type
+ inference. From the latter proof, we extract a certified Objective
+ Caml program which performs type inference in CC and use this code
+ to build a smallscale certified proofchecker.",
+ paper = "Barr18.pdf",
+ keywords = "printed"
+}
+
@inproceedings{Bent07,
author = "Benton, Nick and Zarfaty, Uri",
title = {{Formalizing and Verifying Semantic Type Soundness of a
@@ 9228,6 +9358,13 @@ paper = "Brea89.pdf"
keywords = "printed"
}
+@misc{Bonn18,
+ author = "BonnaireSergeant, Ambrose",
+ title = {{Are unsound type systems wrong?}},
+ link =
+"\url{http://frenchy64.github.io/2018/04/07/unsoundnessinuntypedtypes.html}",
+}
+
@article{Boye75,
author = "Boyer, Robert S. and Moore, J Strother",
title = {{Proving Theorems About LISP Functions}},
@@ 9501,6 +9638,66 @@ paper = "Brea89.pdf"
pages = "182"
}
+@phdthesis{Dola16,
+ author = "Dolan, Stephen",
+ title = {{Algebraic Subtyping}},
+ school = "University of Cambridge",
+ year = "2016",
+ link = "\url{https://www.cl.cam.ac.uk/~sd601/thesis.pdf}",
+ abstract =
+ "Type inference gives programmers the benefit of static,
+ compiletime type checking without the cost of manually specifying
+ types, and has long been a standard feature of functional programming
+ languages. However, it has proven difficult to integrate type
+ inference with subtyping, since the unification engine at the core of
+ classical type inference accepts only equations, not subtyping
+ constraints.
+
+ This thesis presents a type system combining MLstyle parametric
+ polymorphism and subtyping, with type inference, principal types,
+ and decidable type subsumption. Type inference is based on {\sl
+ biunification} , an analogue of unification that works with
+ subtyping constraints.
+
+ Making this possible are several contributions, beginning with the
+ notion of an 'extensible' type system, in which an open world of
+ types is assumed, so that no typeable program becomes untypeable
+ by the addition of new types to the language. While previous
+ formulations of subtyping fail to be extensible, this thesis
+ shows that adopting a more algebraic approach can remedy this.
+ Using such an approach, this thesis develops the theory of
+ biunification, shows how it is used to infer types, and shows how
+ it can be efficiently implemented, exploiting deep connections
+ between the algebra of regular languages and polymorphic subtyping.",
+ paper = "Dola16.pdf"
+}
+
+@article{Dola17,
+ author = "Dolan, Stephen and Mycroft, Alan",
+ title = {{Polymorphism, Subtyping, and Type Inference in MLsub}},
+ journal = "ACM SIGPLAN Notices",
+ volume = "52",
+ number = "1",
+ year = "2017",
+ pages = "6072",
+ link = "\url{https://www.cl.cam.ac.uk/~sd601/papers/mlsubpreprint.pdf}",
+ abstract =
+ "We present a type system combining subtyping and MLstyle parametric
+ polymorphism. Unlike previous work, our system supports type inference
+ and has compact principal types. We demonstrate this system in the
+ minimal language MLsub, which types a strict superset of core ML
+ programs.
+
+ This is made possible by keeping a strict separation between the types
+ used to describe inputs and those used to describe outputs, and
+ extending the classical unification algorithm to handle subtyping
+ constraints between these input and output types. Principal types are
+ kept compact by type simplification, which exploits deep connections
+ between subtyping and the algebra of regular languages. An
+ implementation is available online.",
+ paper = "Dola17.pdf"
+}
+
@misc{Domi18,
author = "Domipheus",
title = {{Designing a CPU in VHDL}},
@@ 9509,6 +9706,40 @@ paper = "Brea89.pdf"
abstract = "A VHDL CPU"
}
+@inproceedings{Down16,
+ author = "Downen, Paul and Maurer, Luke and Ariola, Zena M.",
+ title = {{Sequent Calculus as a Compiler Intermediate Language}},
+ booktitle = "ICFP'16",
+ year = "2016",
+ pages = "7488",
+ publisher = "ACM",
+ isbn = "9781450342193",
+ abstract =
+ "The $\lambda$calculus is popular as an intermediate language for
+ practical compilers. But in the world of logic it has a
+ lesserknown twin, born at the same time, called the {\sl sequent
+ calculus}. Perhaps that would make for a good intermediate
+ language, too? To explore this question we designed Sequent Core,
+ a practicallyoriented core calculus based on the sequent
+ calculus, and used it to reimplement a substantial chunk of the
+ Glasgow Haskell Compiler.",
+ paper = "Down16.pdf"
+}
+
+@article{Dura14,
+ author = "Duran, Antonio J. and Perez, Mario and Varona, Juan L.",
+ title = {{The Misfortunes of a Trio of Mathematicians Using Computer
+ Algebra Systems. Can We Trust in Them?}},
+ journal = "Notices of the AMS",
+ volume = "61",
+ number = "10",
+ year = "2014",
+ link = "\url{www.ams.org/notices/201410/rnotip1249.pdf}",
+ pages = "12491252",
+ paper = "Dura14.pdf",
+ keywords = "printed"
+}
+
@article{Fill03,
author = "Filliatre, JeanChristophe",
title = {{Verification of NonFunctional Programs using Interpretations
@@ 9568,6 +9799,16 @@ paper = "Brea89.pdf"
keywords = "printed"
}
+@misc{Flan03,
+ author = "Flanagan, Cormac and Sabry, Amr and Duba, Bruce F. and
+ Felleisen, Matthias",
+ title = {{The Essence of Compiling with Continuations}},
+ link =
+ "\url{https://www.cs.rice.edu/~javaplt/411/17spring/Readings/essenceretro.pdf}",
+ paper = "Flan03.pdf",
+ keywords = "printed"
+}
+
@article{Floy64,
author = "Floyd, Robert W.",
title = {{Algorithm 245: Treesort}},
@@ 9609,6 +9850,22 @@ paper = "Brea89.pdf"
paper = "Foxx03.pdf"
}
+@book{Fran92,
+ author = "Francez, Nissim",
+ title = {{Program Verification}},
+ year = "1992",
+ publisher = "AddisonWesley",
+ isbn = "0201416085"
+}
+
+@misc{Giar95,
+ author = "Girard, JeanYves",
+ title = {{Linear Logic: Its Syntax and Semantics}},
+ year = "1995",
+ paper = "Gira95.pdf",
+ keywords = "printed"
+}
+
@article{Glas78,
author = "Glasner, Ingrid and Loeckx, Jacquest",
title = {{A Calculus for Proving Properties of WhilePrograms}},
@@ 9730,6 +9987,45 @@ paper = "Brea89.pdf"
paper = "Gutt95.pdf"
}
+@misc{Hamm95,
+ author = "Hamming, Richard",
+ title = {{Hamming, 'You and Your Research'}},
+ link = "\url{https://www.youtube.com/watch?v=a1zDuOPkMSw}",
+ year = "1995"
+}
+
+@inproceedings{Harp93b,
+ author = "Harper, Robert and Lillibridge, Mark",
+ title = {{Explicit Polymorphism and CPS Conversion}},
+ booktitle = "Symp. of Principles of Programming Languages",
+ publisher = "ACM Press",
+ year = "1993",
+ pages = "206=219",
+ abstract =
+ "We study the typing properties of CPS conversion for an extension
+ of F$\omega$ with control operators. Two classes of evaluation
+ strategies are considered, each with callbyname and callbyvalue
+ variants. Under the 'standard' strategies, constructor abstractions
+ are values, and constructor applications can lead to nontrivial
+ control effects. In contrast, the 'MLlike' strategies evaluate
+ beneath constructor abstractions, reflecting the usual interpretation
+ of programs in languages based on implicit polymorphism. Three
+ continuation passing style sublanguages are considered, one on which
+ the standard strategies coincide, one on which the MLlike
+ strategies coincide, and one on which all the strategies coincide.
+ Compositional, typepreserving CPS transformation algorithms are
+ given for the standard strategies, resulting in terms on which all
+ evaluation strategies coincide. This has as a corollary the
+ soundness and termination of welltyped programs under the standard
+ evaluation strategies. A similar result is obtained for the MLlike
+ callbyname strategy . In contrast, such results are obtained for
+ the callbyvalue MLlike strategy only for a restricted
+ sublanguage in which constructor abstractions are limited to
+ values.",
+ paper = "Harp93b.pdf",
+ keywords = "printed"
+}
+
@incollection{Hoar72,
author = "Hoare, C.A.R",
title = {{Notes on Data Structuring}},
@@ 9739,6 +10035,70 @@ paper = "Brea89.pdf"
pages = "83174"
}
+@article{Huet75,
+ author = "Huet, Gerard P.",
+ title = {{A Unification Algorithm for typed $\lambda$Calculus}},
+ journal = "Theoretical Computer Science",
+ volume = "1",
+ number = "1",
+ pages = "2557",
+ year = "1975",
+ abstract =
+ "A semidecision algorithm is presented, to search for unification of
+ formulas in typed $\omega$order $\lambda$calculus, and its
+ correctness is proved.
+
+ It is shown that the search space is significantly smaller than the
+ one for finding the most general unifiers. In particluar, our search
+ is not redundant. This allows our algorithm to have good
+ directionality and convergence properties.",
+ paper = "Huet75.pdf"
+}
+
+@article{Huet78,
+ author = "Huet, Gerard P. and Lang, Bernard",
+ title = {{Proving and Applying Program Transformations Expressed
+ with SecondOrder Patterns}},
+ journal = "Acta Informatica",
+ volume = "11",
+ number = "1",
+ pages = "3155",
+ year = "1978",
+ abstract =
+ "We propose a program transformation method based on rewritingrules
+ composed of secondorder schemas. A complete secondorder matching
+ algorithm is presented that allows effective use of these rules. We
+ show how to formally prove the correctness of the rules using a
+ denotational semantics for the programming language. We establish the
+ correctness of the transformation method itself, and give techniques
+ pertaining to its actual implementation. The paper is illustrated with
+ recursion removal examples."
+}
+
+@inproceedings{Hugh90,
+ author = "Hughes, John",
+ title = {{Why Functional Programming Matters}},
+ booktitle = "Research Topics in Functional Programming",
+ publisher = "AddisonWesley",
+ year = "1990",
+ pages = "1742",
+ abstract =
+ "As software becomes more and more complex, it is more and more
+ important to structure it well. Wellstructured software is easy to
+ write and to debug, and provides a collection of modules that can be
+ reused to reduce future programming costs. In this paper we show that
+ two fea tures of functional languages in particular, higherorder
+ functions and lazy evaluation, can contribute significantly to
+ modularity. As examples, we manipulate lists and trees, program
+ several numerical algorithms, and implement the alphabeta heuristic
+ (an algorithm from Artificial Intelligence used in gameplaying
+ programs). We conclude that since modularity is the key to successful
+ programming, functional programming offers important advantages for
+ software development.",
+ paper = "Hugh90.pdf",
+ keywords = "printed"
+}
+
@article{Keim09,
author = "Keimel, Klaus and Plotkin, Gordon D.",
title = {{Predicate Transformers for Extended Probability and
@@ 9770,6 +10130,25 @@ paper = "Brea89.pdf"
paper = "Keim09.pdf"
}
+@inproceedings{Kran86,
+ author = "Kranz, David and Kelsey, Richard and Rees, Jonathan and
+ Hudak, Paul and Philbin, James and Adams, Norman",
+ title = {{ORBIT: An Optimizing Compiler for Scheme}},
+ booktitle = "SIGLAN '86",
+ publisher = "ACM",
+ pages = "219233",
+ year = "1986",
+ abstract =
+ "In this paper we describe an optimizing compiler for Scheme
+ called {\sl Orbit} that incorporates our experience with an
+ earlier Scheme compiler called TC, together with some ideas from
+ Steele's Rabbit compiler. The three main design goals have been
+ correctness, generating very efficient compiled code, and
+ portability.",
+ paper = "Kran86.pdf",
+ keywords = "printed"
+}
+
@article{Lamp81,
author = "Lamport, Leslie and Owicki, Susan",
title = {{Program Logics and Program Verification}},
@@ 9781,6 +10160,27 @@ paper = "Brea89.pdf"
keywords = "printed"
}
+@article{Lero09,
+ author = "Leroy, Xavier",
+ title = {{A Formally Verified Compiler Backend}},
+ journal = "Logic in Computer Science",
+ volume = "43",
+ number = "4",
+ pages = "363446",
+ year = "2009",
+ abstract =
+ "This article describes the development and formal verification
+ (proof of semantic preservation) of a compiler backend from Cminor
+ (a simple imperative intermediate language) to PowerPC assembly code,
+ using the Coq proof assistant both for programming the compiler and
+ for proving its soundness. Such a verified compiler is useful in the
+ context of formal methods applied to the certification of critical
+ software: the verification of the compiler guarantees that the safety
+ properties proved on the source code hold for the executable compiled
+ code as well.",
+ paper = "Lero09.pdf"
+}
+
@article{Mano03,
author = "Manolios, Panagiotis and Moore, J Strother",
title = {{Partial Functions in ACL2}},
@@ 9807,6 +10207,86 @@ paper = "Brea89.pdf"
keywords = "printed"
}
+@inproceedings{Mcdo97,
+ author = "McDowell, Raymond and Miller, Dale",
+ title = {{A Logic for Reasoning with HigherOrder Abstract Syntax}},
+ booktitle = "LICS'97",
+ publisher = "IEEE",
+ year = "1997",
+ link = "\url{http://www.lix.polytechnique.fr/~dale/papers/lics97.pdf}",
+ abstract =
+ "Logical frameworks based on intuitionistic or linear logics with
+ highertype quantification have been successfully used to give
+ highlevel, modular, and formal specifications of many important
+ judgments in the area of programming languages and inference
+ systems. Given such specifications, it is natural to consider proving
+ properties about the specified systems in the framework: for example,
+ given the specification of evaluation for a functional programming
+ language, prove that the language is deterministic or that the
+ subjectreduction theorem holds. One challenge in developing a
+ framework for such reasoning is that higherorder abstract syntax
+ (HOAS), an elegant and declarative treatment of objectlevel
+ abstraction and substitution, is difficult to treat in proofs
+ involving induction. In this paper, we present a metalogic that can
+ be used to reason about judgments coded using HOAS; this metalogic is
+ an extension of a simple intuitionistic logic that admits higherorder
+ quantification over simply typed $\lambda$terms (key ingredients for
+ HOAS) as well as induction and a notion of definition. The latter
+ concept of a definition is a prooftheoretic device that allows
+ certain theories to be treated as 'closed' or as defining fixed
+ points. The resulting metalogic can specify various logical
+ frameworks and a large range of judgments regarding programming
+ languages and inference systems. We illustrate this point through
+ examples, including the admissibility of cut for a simple logic and
+ subject reduction, determinacy of evaluation, and the equivalence of
+ SOS and natural semantics presentations of evaluation for a simple
+ functional programming language.",
+ paper = "Mcdo97.pdf"
+}
+
+@article{Mcdo02,
+ author = "McDowell, Raymond and Miller, Dale",
+ title = {{Reasoning with HigherOrder Abstract Syntax in a Logical
+ Framework}},
+ journal = "ACM Trans. Computational Logic",
+ volume = "3",
+ year = "2002",
+ pages = "80136",
+ link = "\url{http://www.lix.polytechnique.fr/~dale/papers/mcdowell01.pdf}",
+ abstract =
+ "Logical frameworks based on intuitionistic or linear logics with
+ highertype quantification have been successfully used to give
+ highlevel, modular, and formal specifications of many important
+ judgments in the area of programming languages and inference systems.
+ Given such specifications, it is natural to consider proving
+ properties about the specified systems in the framework: for example,
+ given the specification of evaluation for a functional programming
+ language, prove that the language is deterministic or that evaluation
+ preserves types. One challenge in developing a framework for such
+ reasoning is that higherorder abstract syntax (HOAS), an elegant and
+ declarative treatment of objectlevel abstraction and substitution, is
+ difficult to treat in proofs involving induction. In this paper, we
+ present a metalogic that can be used to reason about judgments coded
+ using HOAS; this metalogic is an extension of a simple intuitionistic
+ logic that admits higherorder quantification over simply typed
+ $\lambda$terms (key ingredients for HOAS) as well as induction and a
+ notion of definition. The latter concept of definition is a
+ prooftheoretic device that allows certain theories to be treated as
+ 'closed' or as defining fixed points. We explore the difficulties of
+ formal metatheoretic analysis of HOAS encodings by considering
+ encodings of intuitionistic and linear logics, and formally derive the
+ admissibility of cut for important subsets of these logics. We then
+ propose an approach to avoid the apparent tradeoff between the
+ benefits of higherorder abstract syntax and the ability to analyze
+ the resulting encodings. We illustrate this approach through examples
+ involving the simple functional and imperative programming languages
+ $PCF$ and $PCF_{:=}$. We formally derive such properties as unicity of
+ typing, subject reduction, determinacy of evaluation, and the
+ equivalence of transition semantics and natural semantics
+ presentations of evaluation.",
+ paper = "Mcdo02.pdf"
+}
+
@book{Morg98,
author = "Morgan, Carroll",
title = {{Programming from Specifcations, 2nd Ed.}},
@@ 9817,6 +10297,36 @@ paper = "Brea89.pdf"
paper = "Morg98.pdf"
}
+@inproceedings{Morr99,
+ author = "Morrisett, Greg and Walker, David and Crary, Karl and Glew, Neil",
+ title = {{From System F to Typed Assembly Language}},
+ booktitle = "Trans. on Progamming Languages and Systems TOPLAS",
+ volume = "21",
+ number = "3",
+ year = "1999",
+ pages = "527568",
+ abstract =
+ "We motivate the design of typed assembly language (TAL) and present a
+ typepreserving ttranslation from Systemn F to TAL. The typed assembly
+ language we pressent is based on a conventional RISC assembly
+ language, but its static type sytem provides support for enforcing
+ highlevel language abstratctions, such as closures, tuples, and
+ userdefined abstract data types. The type system ensures that
+ welltyped programs cannot violatet these abstractionsl In addition,
+ the typing constructs admit many lowlevel compiler optimiztaions. Our
+ translation to TAL is specified as a sequence of typepreserving
+ transformations, including CPS and closure conversion phases;
+ typecorrect source programs are mapped to typecorrect assembly
+ language. A key contribution is an approach to polymorphic closure
+ conversion that is considerably simpler than previous work. The
+ compiler and typed assembly lanugage provide a fully automatic way to
+ produce certified code, suitable for use in systems where unstrusted
+ and potentially malicious code must be checked for safety before
+ execution.",
+ paper = "Morr99.pdf",
+ keywords = "printed"
+}
+
@article{Myre14,
author = "Myreen, Magnus O. and Davis, Jared",
title = {{The Reflective Milawa Theorem Prover is Sound}},
@@ 9989,6 +10499,242 @@ paper = "Brea89.pdf"
keywords = "printed"
}
+@book{Paul96,
+ author = "Paulson, L.C.",
+ title = {{ML for the WOrking Programmer 2nd Edition}},
+ year = "1996",
+ publisher = "Cambridge University Press",
+ isbn = "052156543X"
+}
+
+@inproceedings{Peyt93,
+ author = "PeytonJones, Simon and Wadler, Philip",
+ title = {{Imperative Functional Programming}},
+ booktitle = "Principles of Programming Languages POPL'93",
+ publisher = "ACM",
+ year = "1993",
+ pages = "7184",
+ abstract =
+ "We present a new model, based on monads, for performing
+ input/output in a nonstrict, purely functional language. It is
+ composable, extensible, efficient, requires no extensions to the
+ type system, and extends smoothly to incorporate mixedlanguage
+ working and inplace array updates.",
+ paper = "Peyt93.pdf"
+}
+
+@misc{Peyt17,
+ author = "PeytonJones, Simon",
+ title = {{Escape from the ivory tower: the Haskell journey}},
+ link = "\url{https://www.youtube.com/watch?v=re96UgMk6GQ}",
+ year = "2017"
+}
+
+@inproceedings{Pfen88a,
+ author = "Pfenning, Frank and Elliott, Conal",
+ title = {{HigherOrder Abstract Syntax}},
+ booktitle = "Symp on Language Design and Implementation PLDI'88",
+ publisher = "ACM",
+ link = "\url{https://www.cs.cmu.edu/~fp/papers/pldi88.pdf}",
+ year = "1988",
+ abstract =
+ "We describe motivation, design, use, and implementation of
+ higherorder abstract syntax as a central representation for
+ programs, formulas, rules, and other syntactic objects in program
+ manipulation and other formal systems where matching and substitution
+ or unification are central operations. Higherorder abstract syntax
+ incorporates name binding information in a uniform and language
+ generic way. Thus it acts as a powerful link integrating diverse
+ tools in such formal environments. We have implemented higherorder
+ abstract syntax, a supporting matching and unification algorithm, and
+ some clients in Common Lisp in the framework of the Ergo project at
+ Carnegie Mellon University.",
+ paper = "Pfen88a.pdf",
+ keywords = "printed"
+}
+
+@misc{Phel92,
+ author = "Phelps, Tom",
+ title = {{A Common Lisp CGOL}},
+ year = "1992",
+ link = "\url{https://people.eecs.berkeley.edu/~fateman/cgol/cgol.1/cgol.ps}",
+ abstract =
+ "CGOL is an Algollike notation for Lisp. The original version,
+ written by Vaughan Pratt at M.I.T. is the early 70s was written in
+ Maclisp. This new version is based on Common Lisp. CGOL was translated
+ to Common Lisp in the following fourstage process:
+
+ (1) cgol.tok, the tokenizer has been almost completely rewritten; (2)
+ cgoll.l, the main translation loop with library of translation schemas
+ has been converted from Maclisp to Common Lisp; (3) the code that
+ cgol1.l produces has been converted to Common Lisp; (4) selected
+ examples of CGOL programs themselves were rewritten, since certain
+ aspects of the semantics of Maclisp would otherwise not be modelled in
+ the expected fashion. Maclisp differs from Common Lisp in a variety of
+ respects, and some of them are apparent from CGOL including direct
+ escapes to Lisp, variable scoping, function definitions and numerous
+ other aspects.
+
+ In contrast to the programming described above, the major contribution
+ of this paper is annotation of selected code from the CGOL
+ translator.",
+ paper = "Phil92.pdf"
+}
+
+@phdthesis{Poll94,
+ author = "Pollack, Robert",
+ title = {{The Theory of LEGO  A Proof Checker for the Extended Calculus
+ of Constructions}},
+ school = "University of Edinburgh",
+ link =
+ "\url{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.38.2610}",
+ year = "1994",
+ abstract =
+ "LEGO is a computer program for interactive typechecking in the
+ Extended Calculus of Constructions and two of its subsystems. LEGO
+ also supports the extension of these three systems with inductive
+ types. These type systems can be viewed as logics, and as meta
+ languages for expressing logics, and LEGO is intended to be used for
+ interactively constructing proofs in mathematical theories presented
+ in these logics. I have developed LEGO over six years, starting from
+ an implementation of the Calculus of Constructions by Gerard
+ Huet. LEGO has been used for problems at the limits of our abilities
+ to do formal mathematics.
+
+ In this thesis I explain some aspects of the metatheory of LEGO’s
+ type systems leading to a machinechecked proof that typechecking is
+ decidable for all three type theories supported by LEGO, and to a
+ verified algorithm for deciding their typing judgements, assuming only
+ that they are normalizing. In order to do this, the theory of Pure
+ Type Systems (PTS) is extended and formalized in LEGO. This extended
+ example of a formally developed body of mathematics is described, both
+ for its main theorems, and as a case study in formal mathematics. In
+ many examples, I compare formal definitions and theorems with their
+ informal counterparts, and with various alternative approaches, to
+ study the meaning and use of mathematical language, and suggest
+ clarifications in the informal usage.
+
+ Having outlined a formal development far too large to be surveyed in
+ detail by a human reader, I close with some thoughts on how the human
+ mathematician’s state of understanding and belief might be affected by
+ posessing such a thing.",
+ paper = "Poll94.pdf"
+}
+
+@article{Poll94a,
+ author = "Pollack, Robert",
+ title = {{On Extensibility of Proof Checkers}},
+ journal = "LNCS",
+ volume = "996",
+ pages = "140161",
+ year = "1994",
+ abstract =
+ "My suggestion is little different from LCF, just replacing one
+ computational meta language (ML) with another (ECC, FS0,...). The
+ philosophical point is that it is then possible to accept non
+ canonical proof notations as object level proofs, removing the need to
+ actually normalize them. There are problems to be worked out in
+ practice, such as extraction of programs from constructive proof, and
+ efficient execution of pure, total programs. Although this approach
+ doesn't address the difficulty of proving correctness of tactics in
+ the meta level, it is immediatly useful for tactics with structural
+ justification (e.g. weakening) which are not even representable in
+ LCF, and are infeasible in the Nuprl variant of LCF. Since it can be
+ used for any object system without adding new principles such as
+ reflection, and is compatible with other approaches to extensibility
+ (especially partial reflection), it should be considered as part of
+ the answer to extensibility in proof checkers.",
+ paper = "Poll94a.pdf"
+}
+
+@misc{Remy17,
+ author = "Remy, Didier",
+ title = {{Type Systems for Programming Languages}},
+ ywar = "2017",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours1.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours2.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours3.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours4.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours5.pdf}",
+ paper = "Remy17.tgz"
+}
+
+@techreport{Sabr92,
+ author = "Sabry, Amr and Felleisen, Matthias",
+ title = {{Reasoning About Programs in ContinuationPassing Style}},
+ type = "technical report",
+ number = "TR 92180",
+ institution = "Rice University",
+ abstract =
+ "Plotkin's $\lambda$value calculus is sound but incomplete for
+ reasoning about $\beta\nu$transformations on programs in continuation
+ passing style (CPS). To find a complete extension, we define a new,
+ compactifying CPS transformation and an 'inverse' mapping,
+ $un$CPS, both of which are interesting in their own right. Using the
+ new CPS transformation, we can determine the precise language of CPS
+ terms closed under $\beta\nu$ transformations. Using the $un$CPS
+ transformation, we can derive a set of axioms such that every equation
+ between source programs is provable if and only if $\beta\nu$ can
+ prove the corresponding equation between CPS programs. The extended
+ calculus is equivalent to an untyped variant of Moggi's computational
+ $\lambda$calculus.",
+ paper = "Sabr92.pdf",
+ keywords = "printed"
+}
+
+@techreport{Stee78,
+ author = "Steele, Guy Lewis",
+ title = {{RABBIT: A Compiler for SCHEME}},
+ institution = "MIT",
+ type = "technical report",
+ number = "AITR474",
+ year = "1978",
+ abstract =
+ "We have developed a compiler for the lexicallyscoped dialect of LISP
+ known as SCHEME. The compiler knows relatively little about specific
+ data manipulation primitives such as arithmetic operators, but
+ concentrates on general issues of environment and control. Rather than
+ having specialized knowledge about a large variety of control and
+ environment constructs, the compiler handles only a small basis set
+ which reflects the semantics of lambdacalculus. All of the
+ traditional imperative constructs, such as sequencing, assignment,
+ looping, GOTO, as well as many standard LISP constructs such as AND,
+ OR, and COND, are expressed in macros in terms of the applicative
+ basis set. A small number of optimization techniques, coupled with the
+ treatment of function calls as GOTO statements, serve to produce code
+ as good as that produced by more traditional compilers. The macro
+ approach enables speedy implementation of new constructs as desired
+ without sacrificing efficiency in the generated code.
+
+ A fair amount of analysis is devoted to determining whether
+ environments may be stackallocated or must be heapallocated.
+ Heapallocated environments are necessary in general because SCHEME
+ (unlike Algol 60 and Algol 68, for example) allows procedures with
+ free lexically scoped variables to be returned as the values of other
+ procedures; the Algol stackallocation environment strategy does not
+ suffice. The methods used here indicate that a heapallocating
+ generalization of the 'display' technique leads to an efficient
+ implementation of such 'upward funargs'. Moreover, compiletime
+ optimization and analysis can eliminate many 'funargs' entirely, and
+ so far fewer environment structures need be allocated at run time than
+ might be expected.
+
+ A subset of SCHEME (rather than triples, for example) serves as the
+ representation intermediate between the optimized SCHEME code and the
+ final output code; code is expressed in this subset in the socalled
+ continuationpassing style. As a subset of SCHEME, it enjoys the same
+ theoretical properties; one could even apply the same optimizer used
+ on the input code to the intermediate code. However, the subset is so
+ chosen that all temporary quantities are made manifest as variables,
+ and no control stack is needed to evaluate it. As a result, this
+ apparently applicative representation admits an imperative
+ interpretation which permits easy transcription to final imperative
+ machine code. These qualities suggest that an applicative language
+ like SCHEME is a better candidate for an UNCOL than the more
+ imperative candidates proposed to date.",
+ paper = "Stee78.pdf"
+}
+
@inproceedings{Talp92,
author = "Talpin, JeanPierre and Jouvelot, Pierre",
title = {{The Type and Effect Discipline}},
@@ 10017,25 +10763,154 @@ paper = "Brea89.pdf"
keywords = "printed"
}
@article{Lero09,
 author = "Leroy, Xavier",
 title = {{A Formally Verified Compiler Backend}},
 journal = "Logic in Computer Science",
 volume = "43",
 number = "4",
 pages = "363446",
 year = "2009",
+@techreport{Tard90,
+ author = "Tarditi, David and Acharya, Anurag and Lee, Peter",
+ title = {{No Assembly Required: Compiling Standard ML to C}},
+ type = "technical report",
+ institution = "Carnegie Mellon University",
+ number = "CMUCS90187",
+ year = "1990",
abstract =
 "This article describes the development and formal verification
 (proof of semantic preservation) of a compiler backend from Cminor
 (a simple imperative intermediate language) to PowerPC assembly code,
 using the Coq proof assistant both for programming the compiler and
 for proving its soundness. Such a verified compiler is useful in the
 context of formal methods applied to the certification of critical
 software: the verification of the compiler guarantees that the safety
 properties proved on the source code hold for the executable compiled
 code as well.",
 paper = "Lero09.pdf"
+ "C has been proposed as a portable target language for
+ implementing higherorder languages. Previous efforts at compiling
+ such languages to C have produced efficient code, but have had to
+ compromise on either the portability or the preservation of the
+ tailrecursive properties of the languages. We assert that neither
+ of these compromises is necessary for the generation of efficient
+ code. We offer a Standard ML to C compiler, which does not make
+ wither of these compromises, as an existence proof. The generated
+ code achieves an execution speed that is just a factor of two
+ slower than the best native code compiler. In this paper, we
+ describe the design, implementation and the performance of this
+ compiler.",
+ paper = "Tard90.pdf",
+ keywords = "printed"
+}
+
+@misc{Tuho18,
+ author = "Tuhola, Henri",
+ title = {{An another MLsub study}},
+ link = "\url{http://boxbase.org/entries/2018/mar/12/mlsubstudy/}",
+ year = "2018",
+ comment = "Subtyping does not solve the problem of coercion"
+}
+
+@misc{Tuho18a,
+ author = "Tuhola, Henri",
+ title = {{Boxbase  Index}},
+ link = "\url{http://boxbase.org/}",
+ year = "2018",
+ keywords = "axiomref"
+}
+
+\subsection{U} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{V} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{W} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Wadler, Philip}
+\begin{chunk}{axiom.bib}
+@article{Wadl92,
+ author = "Wadler, Philip",
+ title = {{Comprehending Monads}},
+ journal = "Mathematical Structures in Computer Science",
+ volume = "2",
+ pages = "461493",
+ year = "1992",
+ abstract =
+ "Category theorists invented {\sl monads} in the 1960s to
+ concisely express certain aspects of universal algebra. Functional
+ programmers invented {\sl list comprehensions} in the 1970s ot
+ concisely express certain programs involving lists. This paper
+ shows how list comprehensions may be generalised to an arbitrary
+ monad, and how the resulting programming feature can concisely
+ express in a pure functional language some programs that
+ manipulate state, handle exceptions, parse text, or invoke
+ continuations. A new solution to the old problem of destructive
+ array update is also presented. No knowledge of category theory is
+ assumed.",
+ paper = "Wadl92.pdf"
+}
+
+@article{Wadl07,
+ author = "Wadler, Philip",
+ title = {{The GirardReynolds isomorphism (second edition)}},
+ journal = "Theoretical Computer Science",
+ volume = "375",
+ pages = "201226",
+ year = "2007",
+ abstract =
+ "eanYves Girard and John Reynolds independently discovered the
+ secondorder polymorphic lambda calculus, F2. Girard additionally
+ proved a Representation Theorem : every function on natural numbers
+ that can be proved total in secondorder intuitionistic predicate
+ logic, P2, can be represented in F2. Reynolds additionally proved an
+ Abstraction Theorem : every term in F2 satisfies a suitable notion of
+ logical relation; and formulated a notion of parametricity satisfied
+ by wellbehaved models.
+
+ We observe that the essence of Girard’s result is a projection from P2
+ into F2, and that the essence of Reynolds’s result is an embedding of
+ F2 into P2, and that the Reynolds embedding followed by the Girard
+ projection is the identity. We show that the inductive naturals are
+ exactly those values of type natural that satisfy Reynolds’s notion of
+ parametricity, and as a consequence characterize situations in which
+ the Girard projection followed by the Reynolds embedding is also the
+ identity.
+
+ An earlier version of this paper used a logic over untyped terms. This
+ version uses a logic over typed term, similar to ones considered by
+ Abadi and Plotkin and by Takeuti, which better clarifies the
+ relationship between F2 and P2.",
+ paper = "Wadl07.pdf"
+}
+
+@phdthesis{Zeil09,
+ author = "Zeilberger, Noam",
+ title = {{The Logical Basis of Evaluation Order and PatternMatching}},
+ school = "Carnegie Mellon University",
+ year = "2009",
+ link = "\url{https://www.cs.cmu.edu/~rwh/theses/zeilberger.pdf}",
+ abstract =
+ "An old and celebrated analogy says that writing programs is like
+ proving theorems. This analogy has been productive in both
+ directions, but in particular has demonstrated remarkable utility in
+ driving progress in programming languages, for example leading towards
+ a better understanding of concepts such as abstract data types and
+ polymorphism. One of the best known instances of the analogy actually
+ rises to the level of an isomorphism: between Gentzen’s natural
+ deduction and Church’s lambda calculus. However, as has been
+ recognized for a while, lambda calculus fails to capture some of the
+ important features of modern programming languages. Notably, it does
+ not have an inherent notion of evaluation order, needed to make sense
+ of programs with side effects. Instead, the historical descendents of
+ lambda calculus (languages like Lisp, ML, Haskell, etc.) impose
+ evaluation order in an ad hoc way.
+
+ This thesis aims to give a fresh take on the proofsasprograms
+ analogy—one which better accounts for features of modern programming
+ languages—by starting from a different logical foundation. Inspired
+ by Andreoli’s focusing proofs for linear logic, we explain how to
+ axiomatize certain canonical forms of logical reasoning through a
+ notion of pattern. Propositions come with an intrinsic polarity,
+ based on whether they are defined by patterns of proof, or by patterns
+ of refutation. Applying the analogy, we then obtain a programming
+ language with builtin support for patternmatching, in which
+ evaluation order is explicitly reflected at the level of types—and
+ hence can be controlled locally, rather than being an ad hoc, global
+ policy decision. As we show, different forms of continuationpassing
+ style (one of the historical tools for analyzing evaluation order)
+ can be described in terms of different polarizations. This language
+ provides an elegant, uniform account of both untyped and
+ intrinsicallytyped computation (incorporating ideas from infinitary
+ proof theory), and additionally, can be provided an extrinsic type
+ system to express and statically enforce more refined properties of
+ programs. We conclude by using this framework to explore the theory of
+ typing and subtyping for intersection and union types in the presence
+ of effects, giving a simplified explanation of some of the unusual
+ artifacts of existing systems.",
+ paper = "Zeil09.pdf"
}
@incollection{Soze06,
@@ 19319,7 +20194,7 @@ paper = "Brea89.pdf"
date = {20060526}
}
 @book{Flan03,
+@book{Flan03a,
author = "Flanders, Harley",
title = {{Differential Forms with Applications to the Physical Sciences}},
year = "2003",
@@ 19680,6 +20555,27 @@ paper = "Brea89.pdf"
is not as elegant as that of the unification techniques."
}
+@article{Bake91,
+ author = "Baker, Henry G.",
+ title = {{Pragmatic Parsing in Common Lisp}},
+ journal = "ACM Lisp Pointers",
+ volume = "IV",
+ number = "2",
+ pages = "315",
+ year = "1991",
+ abstract =
+ "We review META, a classic technique for building recursive descent
+ parsers, that is both simple and efficient. While META does not
+ handle all possible regular or contextfree grammars, it handles a
+ surprisingly large fraction of the grammars encountered by Lisp
+ programmers. We show how META can be used to parse streams, strings
+ and lists—including Common Lisp's hairy lambda expression parameter
+ lists. Finally, we compare the execution time of this parsing method
+ to the builtin methods of Common Lisp.",
+ paper = "Bake91.pdf",
+ keywords = "printed"
+}
+
@book{Basu06,
author = "Basu, Saugata and Pollack, Richard and
Roy, MarieFrancoise",
@@ 39297,7 +40193,7 @@ paper = "Brea89.pdf"
year = "1973",
link = "\url{http://hall.org.ua/halls/wizzard/pdf/Vaughan.Pratt.TDOP.pdf}",
paper = "Prat73.pdf",
 keywords = "axiomref"
+ keywords = "axiomref, printed"
}
@book{Pres07,
diff git a/books/bookvolbib.pamphlet b/books/bookvolbib.pamphlet
index d365b2a..909e1bb 100644
 a/books/bookvolbib.pamphlet
+++ b/books/bookvolbib.pamphlet
@@ 1918,7 +1918,7 @@ paper = "Brea89.pdf"
that the important programming features of ML cannot be added to
any impredicative language, such as the GirardReynolds calcus,
without implicitly assuming a type of all types.",
 paper = "Harp92a.pdf"
+ paper = "Harp93a.pdf"
}
\end{chunk}
@@ 3223,8 +3223,9 @@ paper = "Brea89.pdf"
title = {{Towards a Theory of Type Structure}},
booktitle = "Colloquim on Programming",
year = "1974",
 pages = "911",
 paper = "Reyn74.pdf"
+ pages = "408425",
+ paper = "Reyn74.pdf",
+ keywords = "printed"
}
\end{chunk}
@@ 12196,6 +12197,116 @@ when shown in factored form.
\section{Proving Axiom Correct  Spring 2018}
+\subsection{A} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\begin{chunk}{axiom.bib}
+@misc{Anon18,
+ author = "Anonymous",
+ title = {{A Compiler From Scratch}},
+ link = "\url{https://www.destroyallsoftware.com/screencasts/catalog/acompilerfromscratch}",
+ year = "2018"
+}
+
+\end{chunk}
+
+\index{Appel, Andrew W.}
+\index{MacQueen, David B.}
+\begin{chunk}{axiom.bib}
+@article{Appe87,
+ author = "Appel, Andrew W. and MacQueen, David B.",
+ title = {{A Standard ML Compiler}},
+ journal = "LNCS",
+ volume = "274",
+ pages = "301324",
+ year = "1987",
+ abstract =
+ "Standard ML is a major revision of earlier dialects of the functional
+ language ML. We describe the first compiler written for Standard ML in
+ Standard ML. The compiler incorporates a number of novel features and
+ techniques, and is probably the largest system written to date in
+ Standard ML.
+
+ Great attention was paid to modularity in the construction of the
+ compiler, leading to a successful largescale test of the modular
+ capabilities of Standard ML. The front end is useful for purposes
+ other than compilation, and the back end is easily retargetable (we
+ have code generators for the VAX and MC68020). The module facilities
+ of Standard ML were taken into account early in the design of the
+ compiler, and they particularly influenced the environment management
+ component of the front end. For example, the symbol table structure is
+ designed for fast access to opened structures.
+
+ The front end of the compiler is a single phase that integrates
+ parsing, environment management, and type checking. The middle end
+ uses a sophisticated decision tree scheme to produce efficient
+ pattern matching code for functions and case expressions. The abstract
+ syntax produced by the front end is translated into a simple
+ lambdacalculusbased intermediate representation that lends itself to
+ easy case analysis and optimization in the code generator. Special
+ care was taken in designing the mntime data structures for fast
+ allocation and garbage collection.
+
+ We describe the overall organization of the compiler and present some
+ of the data representations and algorithms used in its various
+ phases. We conclude with some lessons learned about the ML language
+ itself and about compilers for modem functional languages.",
+ paper = "Appe87.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Appel, Andrew W.}
+\index{Jim, Trevor}
+\begin{chunk}{axiom.bib}
+@techreport{Appe88,
+ author = "Appel, Andrew W. and Jim, Trevor",
+ title = {{ContinuationPassing, ClosurePassing Style}},
+ institution = "Princston University",
+ year = "1988",
+ number = "CSTR18388",
+ abstract =
+ "We implemented a continuationpassing style (CPS) code generator for
+ ML. Our CPS language is represented as an ML datatype in which all
+ functions are named and most kinds of illformed expressions are
+ impossible. We separate the code generation into phases that rewrite
+ this representation into eversimpler forms. Closures are represented
+ explicitly as records, so that closure strategies can be communicated
+ from one phase to another. No stack is used. Our benchmark data shows
+ that the new method is an improvement over our previous,
+ abstractmachine based code generator.",
+ paper = "Appe88.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Appel, Andrew W.}
+\begin{chunk}{axiom.bib}
+@book{Appe92,
+ author = "Appel, Andrew W.",
+ title = {{Compiling with Continuations}},
+ year = "1992",
+ publisher = "Cambridge University Press",
+ isbn = "052103311X"
+}
+
+\end{chunk}
+
+\index{Appel, Andrew W.}
+\begin{chunk}{axiom.bib}
+@book{Appe98,
+ author = "Appel, Andrew W.",
+ title = {{Modern Compiler Implementation in ML}},
+ year = "1998",
+ publisher = "Cambridge University Press",
+ isbn = "0521607647"
+}
+
+\end{chunk}
+
+\subsection{B} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{Back, R.J.R}
\begin{chunk}{axiom.bib}
@article{Back81,
@@ 12227,6 +12338,59 @@ when shown in factored form.
\end{chunk}
+\index{Barras, Bruno}
+\begin{chunk}{axiom.bib}
+@techreport{Barr96a,
+ author = "Barras, Bruno",
+ title = {{Coq en Coq}},
+ institution = "INRIA",
+ year = "1996",
+ type = "Research Report",
+ number = "inria00073667",
+ comment = "French",
+ abstract =
+ "The essential step of the formal verification of a proofchecker such
+ as Coq is the verification of its kernel: a typechecker for the
+ {\sl Calculus of Inductive Constructions} (CIC) whihc is its underlying
+ formalism. The present work is a first smallscale attempt on a
+ significative fragment of CIC: the Calculus of Constructions (CC). We
+ formalize the definition and the metatheory of (CC) in Coq. In
+ particular, we prove strong normalization and decidability of type
+ inference. From the latter proof, we extract a certified {\sl Caml
+ Light} program, which performs type inference (or typechecking) for
+ an arbitrary typing judgement in CC. Integrating this program in a
+ larger system, including a parser and prettyprinter, we obtain a
+ standalone proofchecker, called {\sl CoC}, the {\sl Calculus of
+ Constructions}. As an example, the formal proof of Newman's lemma,
+ build with Coq, can be reverified by {\sl CoC} with reasonable
+ performance.",
+ paper = "Barr96a.pdf"
+}
+
+\end{chunk}
+
+\index{Barras, Bruno}
+\index{Werner, Benjamin}
+\begin{chunk}{axiom.bib}
+@misc{Barr18,
+ author = "Barras, Bruno and Werner, Benjamin",
+ title = {{Coq in Coq}},
+ link =
+"\url{http://www.lix.polytechnique.fr/Labo/Bruno.Barras/publi/coqincoq.pdf}",
+ comment = "https://github.com/coqcontribs/coqincoq",
+ abstract =
+ "We formalize the definition and the metatheory of the Calculus of
+ Constructions (CC) using the proof assistant Coq. In particular,
+ we prove strong normalization and decidability of type
+ inference. From the latter proof, we extract a certified Objective
+ Caml program which performs type inference in CC and use this code
+ to build a smallscale certified proofchecker.",
+ paper = "Barr18.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Benton, Nick}
\index{Zarfaty, Uri}
\begin{chunk}{axiom.bib}
@@ 12251,6 +12415,17 @@ when shown in factored form.
\end{chunk}
+\index{BonnaireSergeant, Ambrose}
+\begin{chunk}{axiom.bib}
+@misc{Bonn18,
+ author = "BonnaireSergeant, Ambrose",
+ title = {{Are unsound type systems wrong?}},
+ link =
+"\url{http://frenchy64.github.io/2018/04/07/unsoundnessinuntypedtypes.html}",
+}
+
+\end{chunk}
+
\index{Boyer, Robert S.}
\index{Moore, J Strother}
\begin{chunk}{axiom.bib}
@@ 12277,6 +12452,8 @@ when shown in factored form.
\end{chunk}
+\subsection{C} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{Caldwell, James L.}
\begin{chunk}{axiom.bib}
@inproceedings{Cald97,
@@ 12437,6 +12614,8 @@ when shown in factored form.
\end{chunk}
+\subsection{D} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{Dahl, OleJohan}
\index{Hoare, C.A.R}
\begin{chunk}{axiom.bib}
@@ 12579,6 +12758,75 @@ when shown in factored form.
\end{chunk}
+\index{Dolan, Stephen}
+\begin{chunk}{axiom.bib}
+@phdthesis{Dola16,
+ author = "Dolan, Stephen",
+ title = {{Algebraic Subtyping}},
+ school = "University of Cambridge",
+ year = "2016",
+ link = "\url{https://www.cl.cam.ac.uk/~sd601/thesis.pdf}",
+ abstract =
+ "Type inference gives programmers the benefit of static,
+ compiletime type checking without the cost of manually specifying
+ types, and has long been a standard feature of functional programming
+ languages. However, it has proven difficult to integrate type
+ inference with subtyping, since the unification engine at the core of
+ classical type inference accepts only equations, not subtyping
+ constraints.
+
+ This thesis presents a type system combining MLstyle parametric
+ polymorphism and subtyping, with type inference, principal types,
+ and decidable type subsumption. Type inference is based on {\sl
+ biunification} , an analogue of unification that works with
+ subtyping constraints.
+
+ Making this possible are several contributions, beginning with the
+ notion of an 'extensible' type system, in which an open world of
+ types is assumed, so that no typeable program becomes untypeable
+ by the addition of new types to the language. While previous
+ formulations of subtyping fail to be extensible, this thesis
+ shows that adopting a more algebraic approach can remedy this.
+ Using such an approach, this thesis develops the theory of
+ biunification, shows how it is used to infer types, and shows how
+ it can be efficiently implemented, exploiting deep connections
+ between the algebra of regular languages and polymorphic subtyping.",
+ paper = "Dola16.pdf"
+}
+
+\end{chunk}
+
+\index{Dolan, Stephen}
+\index{Mycroft, Alan}
+\begin{chunk}{axiom.bib}
+@article{Dola17,
+ author = "Dolan, Stephen and Mycroft, Alan",
+ title = {{Polymorphism, Subtyping, and Type Inference in MLsub}},
+ journal = "ACM SIGPLAN Notices",
+ volume = "52",
+ number = "1",
+ year = "2017",
+ pages = "6072",
+ link = "\url{https://www.cl.cam.ac.uk/~sd601/papers/mlsubpreprint.pdf}",
+ abstract =
+ "We present a type system combining subtyping and MLstyle parametric
+ polymorphism. Unlike previous work, our system supports type inference
+ and has compact principal types. We demonstrate this system in the
+ minimal language MLsub, which types a strict superset of core ML
+ programs.
+
+ This is made possible by keeping a strict separation between the types
+ used to describe inputs and those used to describe outputs, and
+ extending the classical unification algorithm to handle subtyping
+ constraints between these input and output types. Principal types are
+ kept compact by type simplification, which exploits deep connections
+ between subtyping and the algebra of regular languages. An
+ implementation is available online.",
+ paper = "Dola17.pdf"
+}
+
+\end{chunk}
+
\begin{chunk}{axiom.bib}
@misc{Domi18,
author = "Domipheus",
@@ 12590,6 +12838,56 @@ when shown in factored form.
\end{chunk}
+\index{Downen, Paul}
+\index{Maurer, Luke}
+\index{Ariola, Zena M.}
+\begin{chunk}{axiom.bib}
+@inproceedings{Down16,
+ author = "Downen, Paul and Maurer, Luke and Ariola, Zena M.",
+ title = {{Sequent Calculus as a Compiler Intermediate Language}},
+ booktitle = "ICFP'16",
+ year = "2016",
+ pages = "7488",
+ publisher = "ACM",
+ isbn = "9781450342193",
+ abstract =
+ "The $\lambda$calculus is popular as an intermediate language for
+ practical compilers. But in the world of logic it has a
+ lesserknown twin, born at the same time, called the {\sl sequent
+ calculus}. Perhaps that would make for a good intermediate
+ language, too? To explore this question we designed Sequent Core,
+ a practicallyoriented core calculus based on the sequent
+ calculus, and used it to reimplement a substantial chunk of the
+ Glasgow Haskell Compiler.",
+ paper = "Down16.pdf"
+}
+
+\end{chunk}
+
+\index{Duran, Antonio J.}
+\index{Perez, Mario}
+\index{Varona, Juan L.}
+\begin{chunk}{axiom.bib}
+@article{Dura14,
+ author = "Duran, Antonio J. and Perez, Mario and Varona, Juan L.",
+ title = {{The Misfortunes of a Trio of Mathematicians Using Computer
+ Algebra Systems. Can We Trust in Them?}},
+ journal = "Notices of the AMS",
+ volume = "61",
+ number = "10",
+ year = "2014",
+ link = "\url{www.ams.org/notices/201410/rnotip1249.pdf}",
+ pages = "12491252",
+ paper = "Dura14.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\subsection{E} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{F} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{Filliatre, JeanChristophe}
\begin{chunk}{axiom.bib}
@article{Fill03,
@@ 12658,6 +12956,23 @@ when shown in factored form.
\end{chunk}
+\index{Flanagan, Cormac}
+\index{Sabry, Amr}
+\index{Duba, Bruce F.}
+\index{Felleisen, Matthias}
+\begin{chunk}{axiom.bib}
+@misc{Flan03,
+ author = "Flanagan, Cormac and Sabry, Amr and Duba, Bruce F. and
+ Felleisen, Matthias",
+ title = {{The Essence of Compiling with Continuations}},
+ link =
+ "\url{https://www.cs.rice.edu/~javaplt/411/17spring/Readings/essenceretro.pdf}",
+ paper = "Flan03.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Floyd, Robert W.}
\begin{chunk}{axiom.bib}
@article{Floy64,
@@ 12711,6 +13026,32 @@ when shown in factored form.
\end{chunk}
+\index{Francez, Nissim}
+\begin{chunk}{axiom.bib}
+@book{Fran92,
+ author = "Francez, Nissim",
+ title = {{Program Verification}},
+ year = "1992",
+ publisher = "AddisonWesley",
+ isbn = "0201416085"
+}
+
+\end{chunk}
+
+\subsection{G} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Girard, JeanYves}
+\begin{chunk}{axiom.bib}
+@misc{Giar95,
+ author = "Girard, JeanYves",
+ title = {{Linear Logic: Its Syntax and Semantics}},
+ year = "1995",
+ paper = "Gira95.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Glasner, Ingrid}
\index{Loeckx, Jacquest}
\begin{chunk}{axiom.bib}
@@ 12872,6 +13213,56 @@ when shown in factored form.
\end{chunk}
+\subsection{H} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Hamming, Richard}
+\begin{chunk}{axiom.bib}
+@misc{Hamm95,
+ author = "Hamming, Richard",
+ title = {{Hamming, 'You and Your Research'}},
+ link = "\url{https://www.youtube.com/watch?v=a1zDuOPkMSw}",
+ year = "1995"
+}
+
+\end{chunk}
+
+\index{Harper, Robert}
+\index{Lillibridge, Mark}
+\begin{chunk}{axiom.bib}
+@inproceedings{Harp93b,
+ author = "Harper, Robert and Lillibridge, Mark",
+ title = {{Explicit Polymorphism and CPS Conversion}},
+ booktitle = "Symp. of Principles of Programming Languages",
+ publisher = "ACM Press",
+ year = "1993",
+ pages = "206=219",
+ abstract =
+ "We study the typing properties of CPS conversion for an extension
+ of F$\omega$ with control operators. Two classes of evaluation
+ strategies are considered, each with callbyname and callbyvalue
+ variants. Under the 'standard' strategies, constructor abstractions
+ are values, and constructor applications can lead to nontrivial
+ control effects. In contrast, the 'MLlike' strategies evaluate
+ beneath constructor abstractions, reflecting the usual interpretation
+ of programs in languages based on implicit polymorphism. Three
+ continuation passing style sublanguages are considered, one on which
+ the standard strategies coincide, one on which the MLlike
+ strategies coincide, and one on which all the strategies coincide.
+ Compositional, typepreserving CPS transformation algorithms are
+ given for the standard strategies, resulting in terms on which all
+ evaluation strategies coincide. This has as a corollary the
+ soundness and termination of welltyped programs under the standard
+ evaluation strategies. A similar result is obtained for the MLlike
+ callbyname strategy . In contrast, such results are obtained for
+ the callbyvalue MLlike strategy only for a restricted
+ sublanguage in which constructor abstractions are limited to
+ values.",
+ paper = "Harp93b.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Hoare, C.A.R}
\begin{chunk}{axiom.bib}
@incollection{Hoar72,
@@ 12885,6 +13276,89 @@ when shown in factored form.
\end{chunk}
+\index{Huet, G\'erard P.}
+\begin{chunk}{axiom.bib}
+@article{Huet75,
+ author = "Huet, Gerard P.",
+ title = {{A Unification Algorithm for typed $\lambda$Calculus}},
+ journal = "Theoretical Computer Science",
+ volume = "1",
+ number = "1",
+ pages = "2557",
+ year = "1975",
+ abstract =
+ "A semidecision algorithm is presented, to search for unification of
+ formulas in typed $\omega$order $\lambda$calculus, and its
+ correctness is proved.
+
+ It is shown that the search space is significantly smaller than the
+ one for finding the most general unifiers. In particluar, our search
+ is not redundant. This allows our algorithm to have good
+ directionality and convergence properties.",
+ paper = "Huet75.pdf"
+}
+
+\end{chunk}
+
+\index{Huet, G\'erard P.}
+\index{Lang, Bernard}
+\begin{chunk}{axiom.bib}
+@article{Huet78,
+ author = "Huet, Gerard P. and Lang, Bernard",
+ title = {{Proving and Applying Program Transformations Expressed
+ with SecondOrder Patterns}},
+ journal = "Acta Informatica",
+ volume = "11",
+ number = "1",
+ pages = "3155",
+ year = "1978",
+ abstract =
+ "We propose a program transformation method based on rewritingrules
+ composed of secondorder schemas. A complete secondorder matching
+ algorithm is presented that allows effective use of these rules. We
+ show how to formally prove the correctness of the rules using a
+ denotational semantics for the programming language. We establish the
+ correctness of the transformation method itself, and give techniques
+ pertaining to its actual implementation. The paper is illustrated with
+ recursion removal examples."
+}
+
+\end{chunk}
+
+\index{Hughes, John}
+\begin{chunk}{axiom.bib}
+@inproceedings{Hugh90,
+ author = "Hughes, John",
+ title = {{Why Functional Programming Matters}},
+ booktitle = "Research Topics in Functional Programming",
+ publisher = "AddisonWesley",
+ year = "1990",
+ pages = "1742",
+ abstract =
+ "As software becomes more and more complex, it is more and more
+ important to structure it well. Wellstructured software is easy to
+ write and to debug, and provides a collection of modules that can be
+ reused to reduce future programming costs. In this paper we show that
+ two fea tures of functional languages in particular, higherorder
+ functions and lazy evaluation, can contribute significantly to
+ modularity. As examples, we manipulate lists and trees, program
+ several numerical algorithms, and implement the alphabeta heuristic
+ (an algorithm from Artificial Intelligence used in gameplaying
+ programs). We conclude that since modularity is the key to successful
+ programming, functional programming offers important advantages for
+ software development.",
+ paper = "Hugh90.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\subsection{I} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{J} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{K} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{Keimel, Klaus}
\index{Plotkin, Gordon D.}
\begin{chunk}{axiom.bib}
@@ 12921,6 +13395,36 @@ when shown in factored form.
\end{chunk}
+\index{Kranz, David}
+\index{Kelsey, Richard}
+\index{Rees, Jonathan}
+\index{Hudak, Paul}
+\index{Philbin, James}
+\index{Adams, Norman}
+\begin{chunk}{axiom.bib}
+@inproceedings{Kran86,
+ author = "Kranz, David and Kelsey, Richard and Rees, Jonathan and
+ Hudak, Paul and Philbin, James and Adams, Norman",
+ title = {{ORBIT: An Optimizing Compiler for Scheme}},
+ booktitle = "SIGLAN '86",
+ publisher = "ACM",
+ pages = "219233",
+ year = "1986",
+ abstract =
+ "In this paper we describe an optimizing compiler for Scheme
+ called {\sl Orbit} that incorporates our experience with an
+ earlier Scheme compiler called TC, together with some ideas from
+ Steele's Rabbit compiler. The three main design goals have been
+ correctness, generating very efficient compiled code, and
+ portability.",
+ paper = "Kran86.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\subsection{L} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{Lamport, Leslie}
\index{Owicki, Susan}
\begin{chunk}{axiom.bib}
@@ 12937,6 +13441,33 @@ when shown in factored form.
\end{chunk}
+\index{Leroy, Xavier}
+\begin{chunk}{axiom.bib}
+@article{Lero09,
+ author = "Leroy, Xavier",
+ title = {{A Formally Verified Compiler Backend}},
+ journal = "Logic in Computer Science",
+ volume = "43",
+ number = "4",
+ pages = "363446",
+ year = "2009",
+ abstract =
+ "This article describes the development and formal verification
+ (proof of semantic preservation) of a compiler backend from Cminor
+ (a simple imperative intermediate language) to PowerPC assembly code,
+ using the Coq proof assistant both for programming the compiler and
+ for proving its soundness. Such a verified compiler is useful in the
+ context of formal methods applied to the certification of critical
+ software: the verification of the compiler guarantees that the safety
+ properties proved on the source code hold for the executable compiled
+ code as well.",
+ paper = "Lero09.pdf"
+}
+
+\end{chunk}
+
+\subsection{M} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{Manolios, Panagiotis}
\index{Moore, J Strother}
\begin{chunk}{axiom.bib}
@@ 12968,6 +13499,96 @@ when shown in factored form.
\end{chunk}
+\index{McDowell, Raymond}
+\index{Miller, Dale}
+\begin{chunk}{axiom.bib}
+@inproceedings{Mcdo97,
+ author = "McDowell, Raymond and Miller, Dale",
+ title = {{A Logic for Reasoning with HigherOrder Abstract Syntax}},
+ booktitle = "LICS'97",
+ publisher = "IEEE",
+ year = "1997",
+ link = "\url{http://www.lix.polytechnique.fr/~dale/papers/lics97.pdf}",
+ abstract =
+ "Logical frameworks based on intuitionistic or linear logics with
+ highertype quantification have been successfully used to give
+ highlevel, modular, and formal specifications of many important
+ judgments in the area of programming languages and inference
+ systems. Given such specifications, it is natural to consider proving
+ properties about the specified systems in the framework: for example,
+ given the specification of evaluation for a functional programming
+ language, prove that the language is deterministic or that the
+ subjectreduction theorem holds. One challenge in developing a
+ framework for such reasoning is that higherorder abstract syntax
+ (HOAS), an elegant and declarative treatment of objectlevel
+ abstraction and substitution, is difficult to treat in proofs
+ involving induction. In this paper, we present a metalogic that can
+ be used to reason about judgments coded using HOAS; this metalogic is
+ an extension of a simple intuitionistic logic that admits higherorder
+ quantification over simply typed $\lambda$terms (key ingredients for
+ HOAS) as well as induction and a notion of definition. The latter
+ concept of a definition is a prooftheoretic device that allows
+ certain theories to be treated as 'closed' or as defining fixed
+ points. The resulting metalogic can specify various logical
+ frameworks and a large range of judgments regarding programming
+ languages and inference systems. We illustrate this point through
+ examples, including the admissibility of cut for a simple logic and
+ subject reduction, determinacy of evaluation, and the equivalence of
+ SOS and natural semantics presentations of evaluation for a simple
+ functional programming language.",
+ paper = "Mcdo97.pdf"
+}
+
+\end{chunk}
+
+\index{McDowell, Raymond}
+\index{Miller, Dale}
+\begin{chunk}{axiom.bib}
+@article{Mcdo02,
+ author = "McDowell, Raymond and Miller, Dale",
+ title = {{Reasoning with HigherOrder Abstract Syntax in a Logical
+ Framework}},
+ journal = "ACM Trans. Computational Logic",
+ volume = "3",
+ year = "2002",
+ pages = "80136",
+ link = "\url{http://www.lix.polytechnique.fr/~dale/papers/mcdowell01.pdf}",
+ abstract =
+ "Logical frameworks based on intuitionistic or linear logics with
+ highertype quantification have been successfully used to give
+ highlevel, modular, and formal specifications of many important
+ judgments in the area of programming languages and inference systems.
+ Given such specifications, it is natural to consider proving
+ properties about the specified systems in the framework: for example,
+ given the specification of evaluation for a functional programming
+ language, prove that the language is deterministic or that evaluation
+ preserves types. One challenge in developing a framework for such
+ reasoning is that higherorder abstract syntax (HOAS), an elegant and
+ declarative treatment of objectlevel abstraction and substitution, is
+ difficult to treat in proofs involving induction. In this paper, we
+ present a metalogic that can be used to reason about judgments coded
+ using HOAS; this metalogic is an extension of a simple intuitionistic
+ logic that admits higherorder quantification over simply typed
+ $\lambda$terms (key ingredients for HOAS) as well as induction and a
+ notion of definition. The latter concept of definition is a
+ prooftheoretic device that allows certain theories to be treated as
+ 'closed' or as defining fixed points. We explore the difficulties of
+ formal metatheoretic analysis of HOAS encodings by considering
+ encodings of intuitionistic and linear logics, and formally derive the
+ admissibility of cut for important subsets of these logics. We then
+ propose an approach to avoid the apparent tradeoff between the
+ benefits of higherorder abstract syntax and the ability to analyze
+ the resulting encodings. We illustrate this approach through examples
+ involving the simple functional and imperative programming languages
+ $PCF$ and $PCF_{:=}$. We formally derive such properties as unicity of
+ typing, subject reduction, determinacy of evaluation, and the
+ equivalence of transition semantics and natural semantics
+ presentations of evaluation.",
+ paper = "Mcdo02.pdf"
+}
+
+\end{chunk}
+
\index{Morgan, Carroll}
\begin{chunk}{axiom.bib}
@book{Morg98,
@@ 12982,6 +13603,43 @@ when shown in factored form.
\end{chunk}
+\index{Morrisett, Greg}
+\index{Walker, David}
+\index{Crary, Karl}
+\index{Glew, Neil}
+\begin{chunk}{axiom.bib}
+@inproceedings{Morr99,
+ author = "Morrisett, Greg and Walker, David and Crary, Karl and Glew, Neil",
+ title = {{From System F to Typed Assembly Language}},
+ booktitle = "Trans. on Progamming Languages and Systems TOPLAS",
+ volume = "21",
+ number = "3",
+ year = "1999",
+ pages = "527568",
+ abstract =
+ "We motivate the design of typed assembly language (TAL) and present a
+ typepreserving ttranslation from Systemn F to TAL. The typed assembly
+ language we pressent is based on a conventional RISC assembly
+ language, but its static type sytem provides support for enforcing
+ highlevel language abstratctions, such as closures, tuples, and
+ userdefined abstract data types. The type system ensures that
+ welltyped programs cannot violatet these abstractionsl In addition,
+ the typing constructs admit many lowlevel compiler optimiztaions. Our
+ translation to TAL is specified as a sequence of typepreserving
+ transformations, including CPS and closure conversion phases;
+ typecorrect source programs are mapped to typecorrect assembly
+ language. A key contribution is an approach to polymorphic closure
+ conversion that is considerably simpler than previous work. The
+ compiler and typed assembly lanugage provide a fully automatic way to
+ produce certified code, suitable for use in systems where unstrusted
+ and potentially malicious code must be checked for safety before
+ execution.",
+ paper = "Morr99.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Myreen, Magnus O.}
\index{Davis, Jared}
\begin{chunk}{axiom.bib}
@@ 13152,6 +13810,10 @@ when shown in factored form.
\end{chunk}
+\subsection{N} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{O} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{O'Donnell, Michael J.}
\begin{chunk}{axiom.bib}
@article{Odon81,
@@ 13187,6 +13849,295 @@ when shown in factored form.
\end{chunk}
+\subsection{P} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Paulson, L.C.}
+\begin{chunk}{axiom.bib}
+@book{Paul96,
+ author = "Paulson, L.C.",
+ title = {{ML for the WOrking Programmer 2nd Edition}},
+ year = "1996",
+ publisher = "Cambridge University Press",
+ isbn = "052156543X"
+}
+
+\end{chunk}
+
+\index{PeytonJones, Simon}
+\index{Wadler, Philip}
+\begin{chunk}{axiom.bib}
+@inproceedings{Peyt93,
+ author = "PeytonJones, Simon and Wadler, Philip",
+ title = {{Imperative Functional Programming}},
+ booktitle = "Principles of Programming Languages POPL'93",
+ publisher = "ACM",
+ year = "1993",
+ pages = "7184",
+ abstract =
+ "We present a new model, based on monads, for performing
+ input/output in a nonstrict, purely functional language. It is
+ composable, extensible, efficient, requires no extensions to the
+ type system, and extends smoothly to incorporate mixedlanguage
+ working and inplace array updates.",
+ paper = "Peyt93.pdf"
+}
+
+\end{chunk}
+
+\index{PeytonJones, Simon}
+\begin{chunk}{axiom.bib}
+@misc{Peyt17,
+ author = "PeytonJones, Simon",
+ title = {{Escape from the ivory tower: the Haskell journey}},
+ link = "\url{https://www.youtube.com/watch?v=re96UgMk6GQ}",
+ year = "2017"
+}
+
+\end{chunk}
+
+\index{Pfenning, Frank}
+\index{Elliott, Conal}
+\begin{chunk}{axiom.bib}
+@inproceedings{Pfen88a,
+ author = "Pfenning, Frank and Elliott, Conal",
+ title = {{HigherOrder Abstract Syntax}},
+ booktitle = "Symp on Language Design and Implementation PLDI'88",
+ publisher = "ACM",
+ link = "\url{https://www.cs.cmu.edu/~fp/papers/pldi88.pdf}",
+ year = "1988",
+ abstract =
+ "We describe motivation, design, use, and implementation of
+ higherorder abstract syntax as a central representation for
+ programs, formulas, rules, and other syntactic objects in program
+ manipulation and other formal systems where matching and substitution
+ or unification are central operations. Higherorder abstract syntax
+ incorporates name binding information in a uniform and language
+ generic way. Thus it acts as a powerful link integrating diverse
+ tools in such formal environments. We have implemented higherorder
+ abstract syntax, a supporting matching and unification algorithm, and
+ some clients in Common Lisp in the framework of the Ergo project at
+ Carnegie Mellon University.",
+ paper = "Pfen88a.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Phelps, Tom}
+\begin{chunk}{axiom.bib}
+@misc{Phel92,
+ author = "Phelps, Tom",
+ title = {{A Common Lisp CGOL}},
+ year = "1992",
+ link = "\url{https://people.eecs.berkeley.edu/~fateman/cgol/cgol.1/cgol.ps}",
+ abstract =
+ "CGOL is an Algollike notation for Lisp. The original version,
+ written by Vaughan Pratt at M.I.T. is the early 70s was written in
+ Maclisp. This new version is based on Common Lisp. CGOL was translated
+ to Common Lisp in the following fourstage process:
+
+ (1) cgol.tok, the tokenizer has been almost completely rewritten; (2)
+ cgoll.l, the main translation loop with library of translation schemas
+ has been converted from Maclisp to Common Lisp; (3) the code that
+ cgol1.l produces has been converted to Common Lisp; (4) selected
+ examples of CGOL programs themselves were rewritten, since certain
+ aspects of the semantics of Maclisp would otherwise not be modelled in
+ the expected fashion. Maclisp differs from Common Lisp in a variety of
+ respects, and some of them are apparent from CGOL including direct
+ escapes to Lisp, variable scoping, function definitions and numerous
+ other aspects.
+
+ In contrast to the programming described above, the major contribution
+ of this paper is annotation of selected code from the CGOL
+ translator.",
+ paper = "Phil92.pdf"
+}
+
+\end{chunk}
+
+\index{Pollack, Robert}
+\begin{chunk}{axiom.bib}
+@phdthesis{Poll94,
+ author = "Pollack, Robert",
+ title = {{The Theory of LEGO  A Proof Checker for the Extended Calculus
+ of Constructions}},
+ school = "University of Edinburgh",
+ link =
+ "\url{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.38.2610}",
+ year = "1994",
+ abstract =
+ "LEGO is a computer program for interactive typechecking in the
+ Extended Calculus of Constructions and two of its subsystems. LEGO
+ also supports the extension of these three systems with inductive
+ types. These type systems can be viewed as logics, and as meta
+ languages for expressing logics, and LEGO is intended to be used for
+ interactively constructing proofs in mathematical theories presented
+ in these logics. I have developed LEGO over six years, starting from
+ an implementation of the Calculus of Constructions by Gerard
+ Huet. LEGO has been used for problems at the limits of our abilities
+ to do formal mathematics.
+
+ In this thesis I explain some aspects of the metatheory of LEGO’s
+ type systems leading to a machinechecked proof that typechecking is
+ decidable for all three type theories supported by LEGO, and to a
+ verified algorithm for deciding their typing judgements, assuming only
+ that they are normalizing. In order to do this, the theory of Pure
+ Type Systems (PTS) is extended and formalized in LEGO. This extended
+ example of a formally developed body of mathematics is described, both
+ for its main theorems, and as a case study in formal mathematics. In
+ many examples, I compare formal definitions and theorems with their
+ informal counterparts, and with various alternative approaches, to
+ study the meaning and use of mathematical language, and suggest
+ clarifications in the informal usage.
+
+ Having outlined a formal development far too large to be surveyed in
+ detail by a human reader, I close with some thoughts on how the human
+ mathematician’s state of understanding and belief might be affected by
+ posessing such a thing.",
+ paper = "Poll94.pdf"
+}
+
+\end{chunk}
+
+\index{Pollack, Robert}
+\begin{chunk}{axiom.bib}
+@article{Poll94a,
+ author = "Pollack, Robert",
+ title = {{On Extensibility of Proof Checkers}},
+ journal = "LNCS",
+ volume = "996",
+ pages = "140161",
+ year = "1994",
+ abstract =
+ "My suggestion is little different from LCF, just replacing one
+ computational meta language (ML) with another (ECC, FS0,...). The
+ philosophical point is that it is then possible to accept non
+ canonical proof notations as object level proofs, removing the need to
+ actually normalize them. There are problems to be worked out in
+ practice, such as extraction of programs from constructive proof, and
+ efficient execution of pure, total programs. Although this approach
+ doesn't address the difficulty of proving correctness of tactics in
+ the meta level, it is immediatly useful for tactics with structural
+ justification (e.g. weakening) which are not even representable in
+ LCF, and are infeasible in the Nuprl variant of LCF. Since it can be
+ used for any object system without adding new principles such as
+ reflection, and is compatible with other approaches to extensibility
+ (especially partial reflection), it should be considered as part of
+ the answer to extensibility in proof checkers.",
+ paper = "Poll94a.pdf"
+}
+
+\end{chunk}
+
+\subsection{Q} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{R} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{R\'emy, Didier}
+\begin{chunk}{axiom.bib}
+@misc{Remy17,
+ author = "Remy, Didier",
+ title = {{Type Systems for Programming Languages}},
+ ywar = "2017",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours1.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours2.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours3.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours4.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours5.pdf}",
+ paper = "Remy17.tgz"
+}
+
+\end{chunk}
+
+\subsection{S} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Sabry, Amr}
+\index{Felleisen, Matthias}
+\begin{chunk}{axiom.bib}
+@techreport{Sabr92,
+ author = "Sabry, Amr and Felleisen, Matthias",
+ title = {{Reasoning About Programs in ContinuationPassing Style}},
+ type = "technical report",
+ number = "TR 92180",
+ institution = "Rice University",
+ abstract =
+ "Plotkin's $\lambda$value calculus is sound but incomplete for
+ reasoning about $\beta\nu$transformations on programs in continuation
+ passing style (CPS). To find a complete extension, we define a new,
+ compactifying CPS transformation and an 'inverse' mapping,
+ $un$CPS, both of which are interesting in their own right. Using the
+ new CPS transformation, we can determine the precise language of CPS
+ terms closed under $\beta\nu$ transformations. Using the $un$CPS
+ transformation, we can derive a set of axioms such that every equation
+ between source programs is provable if and only if $\beta\nu$ can
+ prove the corresponding equation between CPS programs. The extended
+ calculus is equivalent to an untyped variant of Moggi's computational
+ $\lambda$calculus.",
+ paper = "Sabr92.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Steele, Guy Lewis}
+\begin{chunk}{axiom.bib}
+@techreport{Stee78,
+ author = "Steele, Guy Lewis",
+ title = {{RABBIT: A Compiler for SCHEME}},
+ institution = "MIT",
+ type = "technical report",
+ number = "AITR474",
+ year = "1978",
+ abstract =
+ "We have developed a compiler for the lexicallyscoped dialect of LISP
+ known as SCHEME. The compiler knows relatively little about specific
+ data manipulation primitives such as arithmetic operators, but
+ concentrates on general issues of environment and control. Rather than
+ having specialized knowledge about a large variety of control and
+ environment constructs, the compiler handles only a small basis set
+ which reflects the semantics of lambdacalculus. All of the
+ traditional imperative constructs, such as sequencing, assignment,
+ looping, GOTO, as well as many standard LISP constructs such as AND,
+ OR, and COND, are expressed in macros in terms of the applicative
+ basis set. A small number of optimization techniques, coupled with the
+ treatment of function calls as GOTO statements, serve to produce code
+ as good as that produced by more traditional compilers. The macro
+ approach enables speedy implementation of new constructs as desired
+ without sacrificing efficiency in the generated code.
+
+ A fair amount of analysis is devoted to determining whether
+ environments may be stackallocated or must be heapallocated.
+ Heapallocated environments are necessary in general because SCHEME
+ (unlike Algol 60 and Algol 68, for example) allows procedures with
+ free lexically scoped variables to be returned as the values of other
+ procedures; the Algol stackallocation environment strategy does not
+ suffice. The methods used here indicate that a heapallocating
+ generalization of the 'display' technique leads to an efficient
+ implementation of such 'upward funargs'. Moreover, compiletime
+ optimization and analysis can eliminate many 'funargs' entirely, and
+ so far fewer environment structures need be allocated at run time than
+ might be expected.
+
+ A subset of SCHEME (rather than triples, for example) serves as the
+ representation intermediate between the optimized SCHEME code and the
+ final output code; code is expressed in this subset in the socalled
+ continuationpassing style. As a subset of SCHEME, it enjoys the same
+ theoretical properties; one could even apply the same optimizer used
+ on the input code to the intermediate code. However, the subset is so
+ chosen that all temporary quantities are made manifest as variables,
+ and no control stack is needed to evaluate it. As a result, this
+ apparently applicative representation admits an imperative
+ interpretation which permits easy transcription to final imperative
+ machine code. These qualities suggest that an applicative language
+ like SCHEME is a better candidate for an UNCOL than the more
+ imperative candidates proposed to date.",
+ paper = "Stee78.pdf"
+}
+
+\end{chunk}
+
+\subsection{T} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
\index{Talpin, JeanPierre}
\index{Jouvelot, Pierre}
\begin{chunk}{axiom.bib}
@@ 13220,27 +14171,180 @@ when shown in factored form.
\end{chunk}
\index{Leroy, Xavier}
+\index{Tarditi, David}
+\index{Acharya, Anurag}
+\index{Lee, Peter}
\begin{chunk}{axiom.bib}
@article{Lero09,
 author = "Leroy, Xavier",
 title = {{A Formally Verified Compiler Backend}},
 journal = "Logic in Computer Science",
 volume = "43",
 number = "4",
 pages = "363446",
 year = "2009",
+@techreport{Tard90,
+ author = "Tarditi, David and Acharya, Anurag and Lee, Peter",
+ title = {{No Assembly Required: Compiling Standard ML to C}},
+ type = "technical report",
+ institution = "Carnegie Mellon University",
+ number = "CMUCS90187",
+ year = "1990",
abstract =
 "This article describes the development and formal verification
 (proof of semantic preservation) of a compiler backend from Cminor
 (a simple imperative intermediate language) to PowerPC assembly code,
 using the Coq proof assistant both for programming the compiler and
 for proving its soundness. Such a verified compiler is useful in the
 context of formal methods applied to the certification of critical
 software: the verification of the compiler guarantees that the safety
 properties proved on the source code hold for the executable compiled
 code as well.",
 paper = "Lero09.pdf"
+ "C has been proposed as a portable target language for
+ implementing higherorder languages. Previous efforts at compiling
+ such languages to C have produced efficient code, but have had to
+ compromise on either the portability or the preservation of the
+ tailrecursive properties of the languages. We assert that neither
+ of these compromises is necessary for the generation of efficient
+ code. We offer a Standard ML to C compiler, which does not make
+ wither of these compromises, as an existence proof. The generated
+ code achieves an execution speed that is just a factor of two
+ slower than the best native code compiler. In this paper, we
+ describe the design, implementation and the performance of this
+ compiler.",
+ paper = "Tard90.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Tuhola, Henri}
+\begin{chunk}{axiom.bib}
+@misc{Tuho18,
+ author = "Tuhola, Henri",
+ title = {{An another MLsub study}},
+ link = "\url{http://boxbase.org/entries/2018/mar/12/mlsubstudy/}",
+ year = "2018",
+ comment = "Subtyping does not solve the problem of coercion"
+}
+
+\end{chunk}
+
+\index{Tuhola, Henri}
+\begin{chunk}{axiom.bib}
+@misc{Tuho18a,
+ author = "Tuhola, Henri",
+ title = {{Boxbase  Index}},
+ link = "\url{http://boxbase.org/}",
+ year = "2018",
+ keywords = "axiomref"
+}
+
+\subsection{U} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{V} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{W} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Wadler, Philip}
+\begin{chunk}{axiom.bib}
+@article{Wadl92,
+ author = "Wadler, Philip",
+ title = {{Comprehending Monads}},
+ journal = "Mathematical Structures in Computer Science",
+ volume = "2",
+ pages = "461493",
+ year = "1992",
+ abstract =
+ "Category theorists invented {\sl monads} in the 1960s to
+ concisely express certain aspects of universal algebra. Functional
+ programmers invented {\sl list comprehensions} in the 1970s ot
+ concisely express certain programs involving lists. This paper
+ shows how list comprehensions may be generalised to an arbitrary
+ monad, and how the resulting programming feature can concisely
+ express in a pure functional language some programs that
+ manipulate state, handle exceptions, parse text, or invoke
+ continuations. A new solution to the old problem of destructive
+ array update is also presented. No knowledge of category theory is
+ assumed.",
+ paper = "Wadl92.pdf"
+}
+
+\end{chunk}
+
+\index{Wadler, Philip}
+\begin{chunk}{axiom.bib}
+@article{Wadl07,
+ author = "Wadler, Philip",
+ title = {{The GirardReynolds isomorphism (second edition)}},
+ journal = "Theoretical Computer Science",
+ volume = "375",
+ pages = "201226",
+ year = "2007",
+ abstract =
+ "eanYves Girard and John Reynolds independently discovered the
+ secondorder polymorphic lambda calculus, F2. Girard additionally
+ proved a Representation Theorem : every function on natural numbers
+ that can be proved total in secondorder intuitionistic predicate
+ logic, P2, can be represented in F2. Reynolds additionally proved an
+ Abstraction Theorem : every term in F2 satisfies a suitable notion of
+ logical relation; and formulated a notion of parametricity satisfied
+ by wellbehaved models.
+
+ We observe that the essence of Girard’s result is a projection from P2
+ into F2, and that the essence of Reynolds’s result is an embedding of
+ F2 into P2, and that the Reynolds embedding followed by the Girard
+ projection is the identity. We show that the inductive naturals are
+ exactly those values of type natural that satisfy Reynolds’s notion of
+ parametricity, and as a consequence characterize situations in which
+ the Girard projection followed by the Reynolds embedding is also the
+ identity.
+
+ An earlier version of this paper used a logic over untyped terms. This
+ version uses a logic over typed term, similar to ones considered by
+ Abadi and Plotkin and by Takeuti, which better clarifies the
+ relationship between F2 and P2.",
+ paper = "Wadl07.pdf"
+}
+
+\end{chunk}
+
+\subsection{X} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{Y} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\subsection{Z} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\index{Zeilberger, Noam}
+\begin{chunk}{axiom.bib}
+@phdthesis{Zeil09,
+ author = "Zeilberger, Noam",
+ title = {{The Logical Basis of Evaluation Order and PatternMatching}},
+ school = "Carnegie Mellon University",
+ year = "2009",
+ link = "\url{https://www.cs.cmu.edu/~rwh/theses/zeilberger.pdf}",
+ abstract =
+ "An old and celebrated analogy says that writing programs is like
+ proving theorems. This analogy has been productive in both
+ directions, but in particular has demonstrated remarkable utility in
+ driving progress in programming languages, for example leading towards
+ a better understanding of concepts such as abstract data types and
+ polymorphism. One of the best known instances of the analogy actually
+ rises to the level of an isomorphism: between Gentzen’s natural
+ deduction and Church’s lambda calculus. However, as has been
+ recognized for a while, lambda calculus fails to capture some of the
+ important features of modern programming languages. Notably, it does
+ not have an inherent notion of evaluation order, needed to make sense
+ of programs with side effects. Instead, the historical descendents of
+ lambda calculus (languages like Lisp, ML, Haskell, etc.) impose
+ evaluation order in an ad hoc way.
+
+ This thesis aims to give a fresh take on the proofsasprograms
+ analogy—one which better accounts for features of modern programming
+ languages—by starting from a different logical foundation. Inspired
+ by Andreoli’s focusing proofs for linear logic, we explain how to
+ axiomatize certain canonical forms of logical reasoning through a
+ notion of pattern. Propositions come with an intrinsic polarity,
+ based on whether they are defined by patterns of proof, or by patterns
+ of refutation. Applying the analogy, we then obtain a programming
+ language with builtin support for patternmatching, in which
+ evaluation order is explicitly reflected at the level of types—and
+ hence can be controlled locally, rather than being an ad hoc, global
+ policy decision. As we show, different forms of continuationpassing
+ style (one of the historical tools for analyzing evaluation order)
+ can be described in terms of different polarizations. This language
+ provides an elegant, uniform account of both untyped and
+ intrinsicallytyped computation (incorporating ideas from infinitary
+ proof theory), and additionally, can be provided an extrinsic type
+ system to express and statically enforce more refined properties of
+ programs. We conclude by using this framework to explore the theory of
+ typing and subtyping for intersection and union types in the presence
+ of effects, giving a simplified explanation of some of the unusual
+ artifacts of existing systems.",
+ paper = "Zeil09.pdf"
}
\end{chunk}
@@ 26383,7 +27487,7 @@ Proc ISSAC 97 pp172175 (1997)
\index{DeRhamComplex}
\index{Flanders, Harley}
\begin{chunk}{axiom.bib}
 @book{Flan03,
+@book{Flan03a,
author = "Flanders, Harley",
title = {{Differential Forms with Applications to the Physical Sciences}},
year = "2003",
@@ 26832,6 +27936,31 @@ Proc ISSAC 97 pp172175 (1997)
\end{chunk}
+\index{Baker, Henry G.}
+\begin{chunk}{axiom.bib}
+@article{Bake91,
+ author = "Baker, Henry G.",
+ title = {{Pragmatic Parsing in Common Lisp}},
+ journal = "ACM Lisp Pointers",
+ volume = "IV",
+ number = "2",
+ pages = "315",
+ year = "1991",
+ abstract =
+ "We review META, a classic technique for building recursive descent
+ parsers, that is both simple and efficient. While META does not
+ handle all possible regular or contextfree grammars, it handles a
+ surprisingly large fraction of the grammars encountered by Lisp
+ programmers. We show how META can be used to parse streams, strings
+ and lists—including Common Lisp's hairy lambda expression parameter
+ lists. Finally, we compare the execution time of this parsing method
+ to the builtin methods of Common Lisp.",
+ paper = "Bake91.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
\index{Basu, Saugata}
\index{Pollack, Richard}
\index{Roy, MarieFrancoise}
@@ 57915,7 +59044,7 @@ Mathematical Programming: The State of the Art.
year = "1973",
link = "\url{http://hall.org.ua/halls/wizzard/pdf/Vaughan.Pratt.TDOP.pdf}",
paper = "Prat73.pdf",
 keywords = "axiomref"
+ keywords = "axiomref, printed"
}
\end{chunk}
diff git a/changelog b/changelog
index 4d7c750..33edb40 100644
 a/changelog
+++ b/changelog
@@ 1,3 +1,5 @@
+20180415 tpd src/axiomwebsite/patches.html 20180415.01.tpd.patch
+20180415 tpd books/bookvolbib add references
20180401 tpd src/axiomwebsite/patches.html 20180401.01.tpd.patch
20180401 tpd books/bookvolbib add references
20180309 tpd src/axiomwebsite/patches.html 20180309.03.tpd.patch
diff git a/patch b/patch
index b4f34a6..fda4020 100644
 a/patch
+++ b/patch
@@ 2,673 +2,1001 @@ books/bookvolbib add references
Goal: Proving Axiom Correct
\index{Benton, Nick}
\index{Zarfaty, Uri}
\begin{chunk}{axiom.bib}
@inproceedings{Bent07,
 author = "Benton, Nick and Zarfaty, Uri",
 title = {{Formalizing and Verifying Semantic Type Soundness of a
 Simple Compiler}},
 booktitle = "Principles and Practice of Declarative Programming",
 publisher = "ACM",
 pages = "112",
 year = "2007",
 abstract =
 "We describe a semantic type soundness result, formalized in the Coq
 proof assistant, for a compiler from a simple imperative language with
 heapallocated data into an idealized assembly language. Types in the
 highlevel language are interpreted as binary relations, built using
 both secondorder quantification and a form of separation structure,
 over stores and code pointers in the lowlevel machine.",
 paper = "Bent07.pdf",
 keywords = "printed"
+\index{Francez, Nissim}
+\begin{chunk}{axiom.bib}
+@book{Fran92,
+ author = "Francez, Nissim",
+ title = {{Program Verification}},
+ year = "1992",
+ publisher = "AddisonWesley",
+ isbn = 0201416085"
}
\end{chunk}
\index{Chlipala, Adam J.}
+\index{Appel, Andrew W.}
\begin{chunk}{axiom.bib}
@article{Chli07,
 author = "Chlipala, Adam J.",
 title = {{A Certified TypePreserving Compiler from the Lambda Calculus
 to Assembly Language}},
 journal = "ACM SIGPLAN PLDI'07",
 volume = "42",
 number = "6",
 year = "2007",
 pages = "5465",
 abstract =
 "We present a certified compiler from the simplytyped lambda calculus
 to assembly language. The compiler is certified in the sense that it
 comes with a machinechecked proof of semantics preservation,
 performed with the Coq proof assistant. The compiler and the terms of
 its several intermediate languages are given dependent types that
 guarantee that only welltyped programs are representable. Thus, type
 preservation for each compiler pass follows without any significant
 "proofs" of the usual kind. Semantics preservation is proved based on
 denotational semantics assigned to the intermediate languages. We
 demonstrate how working with a typepreserving compiler enables
 typedirected proof search to discharge large parts of our proof
 obligations automatically.",
 paper = "Chli07.pdf"
+@book{Appe92,
+ author = "Appel, Andrew W.",
+ title = {{Compiling with Continuations}},
+ year = "1992",
+ publisher = "Cambridge University Press",
+ isbn = "052103311X"
}
\end{chunk}
\index{Crary, Karl}
\index{Sarkar, Susmit}
+\index{Appel, Andrew W.}
\begin{chunk}{axiom.bib}
@misc{Crar08,
 author = "Crary, Karl and Sarkar, Susmit",
 title = {{Foundational Certified Code in a Metalogical Framework}},
 link = "\url{http://repository.cmu.edu/compsci/470/}",
 year = "2008",
 abstract =
 "Foundational certified code systems seek to prove untrusted programs
 to be safe relative to safety policies given in terms of actual
 machine architectures, thereby improving the systems' flexibility and
 extensibility. Using the Twelf metalogical framework, we have
 constructed a safety policy for the IA32 architecture with a trusted
 runtime library. The safety policy is based on a formalized
 operational semantics. We have also developed a complete, foundational
 proof that a fully expressive typed assembly language satisfies that
 safety policy",
 paper = "Crar08.pdf"
+@book{Appe98,
+ author = "Appel, Andrew W.",
+ title = {{Modern Compiler Implementation in ML}},
+ year = "1998",
+ publisher = "Cambridge University Press",
+ isbn = "0521607647"
+}
+
+\end{chunk}
+
+\index{Paulson, L.C.}
+\begin{chunk}{axiom.bib}
+@article{Paul96,
+ author = "Paulson, L.C.",
+ title = {{ML for the WOrking Programmer 2nd Edition}},
+ year = "1996",
+ publisher = "Cambridge University Press",
+ isbn = "052156543X"
+}
+
+\end{chunk}
+
+\index{Pollack, Robert}
+\begin{chunk}{axiom.bib}
+@phdthesis{Poll94,
+ author = "Pollack, Robert",
+ title = {{The Theory of LEGO  A Proof Checker for the Extended Calculus
+ of Constructions}},
+ school = "University of Edinburgh",
+ link =
+ "\url{http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.38.2610}",
+ year = "1994",
+ abstract =
+ "LEGO is a computer program for interactive typechecking in the
+ Extended Calculus of Constructions and two of its subsystems. LEGO
+ also supports the extension of these three systems with inductive
+ types. These type systems can be viewed as logics, and as meta
+ languages for expressing logics, and LEGO is intended to be used for
+ interactively constructing proofs in mathematical theories presented
+ in these logics. I have developed LEGO over six years, starting from
+ an implementation of the Calculus of Constructions by Gerard
+ Huet. LEGO has been used for problems at the limits of our abilities
+ to do formal mathematics.
+
+ In this thesis I explain some aspects of the metatheory of LEGO’s
+ type systems leading to a machinechecked proof that typechecking is
+ decidable for all three type theories supported by LEGO, and to a
+ verified algorithm for deciding their typing judgements, assuming only
+ that they are normalizing. In order to do this, the theory of Pure
+ Type Systems (PTS) is extended and formalized in LEGO. This extended
+ example of a formally developed body of mathematics is described, both
+ for its main theorems, and as a case study in formal mathematics. In
+ many examples, I compare formal definitions and theorems with their
+ informal counterparts, and with various alternative approaches, to
+ study the meaning and use of mathematical language, and suggest
+ clarifications in the informal usage.
+
+ Having outlined a formal development far too large to be surveyed in
+ detail by a human reader, I close with some thoughts on how the human
+ mathematician’s state of understanding and belief might be affected by
+ posessing such a thing.",
+ paper = "Poll94.pdf"
}
\end{chunk}
\index{Fox, Anthony}
+\index{Pollack, Robert}
\begin{chunk}{axiom.bib}
@article{Foxx03,
 author = "Fox, Anthony",
 title = {{Formal Specification and Verification of ARM6}}
+@article{Poll94a,
+ author = "Pollack, Robert",
+ title = {{On Extensibility of Proof Checkers}},
journal = "LNCS",
 volume = "2758",
 pages = "2540",
 year = "2003",
 abstract =
 "This paper gives an overview of progress made on the formal
 specification and verification of the ARM6 microarchitecture using
 the HOL proof system. The ARM6 is a commercial processor design preva
 lent in mobile and embedded systems – it features a 3stage pipeline
 with a multicycle execute stage, six operating modes and a rich
 32bit RISC instruction set. This paper describes some of the
 difficulties encountered when working with a full blown instruction
 set architecture that has not been designed with verification in mind.",
 paper = "Foxx03.pdf"
+ volume = "996",
+ pages = "140161",
+ year = "1994",
+ abstract =
+ "My suggestion is little different from LCF, just replacing one
+ computational meta language (ML) with another (ECC, FS0,...). The
+ philosophical point is that it is then possible to accept non
+ canonical proof notations as object level proofs, removing the need to
+ actually normalize them. There are problems to be worked out in
+ practice, such as extraction of programs from constructive proof, and
+ efficient execution of pure, total programs. Although this approach
+ doesn't address the difficulty of proving correctness of tactics in
+ the meta level, it is immediatly useful for tactics with structural
+ justification (e.g. weakening) which are not even representable in
+ LCF, and are infeasible in the Nuprl variant of LCF. Since it can be
+ used for any object system without adding new principles such as
+ reflection, and is compatible with other approaches to extensibility
+ (especially partial reflection), it should be considered as part of
+ the answer to extensibility in proof checkers.",
+ paper = "Poll94a.pdf"
}
\end{chunk}
\index{Guttman, Joshua D.}
\index{Ramsdell, John D.}
\index{Wand, Mitchell}
+\index{Pfenning, Frank}
+\index{Elliott, Conal}
\begin{chunk}{axiom.bib}
@article{Gutt95,
 author = "Guttman, Joshua D. and Ramsdell, John D. and Wand, Mitchell",
 title = {{VLISP: A Verified Implementation of Scheme}},
 journal = "Lisp and Symbolic Computation",
 volume = "8",
 pages = "532",
 year = "1995",
+@inproceedings{Pfen88a,
+ author = "Pfenning, Frank and Elliott, Conal",
+ title = {{HigherOrder Abstract Syntax}},
+ booktitle = "Symp on Language Design and Implementation PLDI'88",
+ publisher = "ACM",
+ link = "\url{https://www.cs.cmu.edu/~fp/papers/pldi88.pdf}",
+ year = "1988",
+ abstract =
+ "We describe motivation, design, use, and implementation of
+ higherorder abstract syntax as a central representation for
+ programs, formulas, rules, and other syntactic objects in program
+ manipulation and other formal systems where matching and substitution
+ or unification are central operations. Higherorder abstract syntax
+ incorporates name binding information in a uniform and language
+ generic way. Thus it acts as a powerful link integrating diverse
+ tools in such formal environments. We have implemented higherorder
+ abstract syntax, a supporting matching and unification algorithm, and
+ some clients in Common Lisp in the framework of the Ergo project at
+ Carnegie Mellon University.",
+ paper = "Pfen88a.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{McDowell, Raymond}
+\index{Miller, Dale}
+\begin{chunk}{axiom.bib}
+@article{Mcdo02,
+ author = "McDowell, Raymond and Miller, Dale",
+ title = {{Reasoning with HigherOrder Abstract Syntax in a Logical
+ Framework}},
+ journal = "ACM Trans. Computational Logic",
+ volume = "3",
+ year = "2002",
+ pages = "80136",
+ link = "\url{http://www.lix.polytechnique.fr/~dale/papers/mcdowell01.pdf}",
abstract =
 "The VL!SP project showed how to produce a comprehensively verified
 implementation for a programming language, namely Scheme. This paper
 introduces two more detailed studies on VLISP [13, 21). It summarizes
 the basic techniques that were used repeatedly throughout the effort.
 It presents scientific conclusions about the applicability of the
 these techniques as well as engineering conclusions about the crucial
 choices that allowed the verification to succeed.",
 paper = "Gutt95.pdf"
+ "Logical frameworks based on intuitionistic or linear logics with
+ highertype quantification have been successfully used to give
+ highlevel, modular, and formal specifications of many important
+ judgments in the area of programming languages and inference systems.
+ Given such specifications, it is natural to consider proving
+ properties about the specified systems in the framework: for example,
+ given the specification of evaluation for a functional programming
+ language, prove that the language is deterministic or that evaluation
+ preserves types. One challenge in developing a framework for such
+ reasoning is that higherorder abstract syntax (HOAS), an elegant and
+ declarative treatment of objectlevel abstraction and substitution, is
+ difficult to treat in proofs involving induction. In this paper, we
+ present a metalogic that can be used to reason about judgments coded
+ using HOAS; this metalogic is an extension of a simple intuitionistic
+ logic that admits higherorder quantification over simply typed
+ $\lambda$terms (key ingredients for HOAS) as well as induction and a
+ notion of definition. The latter concept of definition is a
+ prooftheoretic device that allows certain theories to be treated as
+ 'closed' or as defining fixed points. We explore the difficulties of
+ formal metatheoretic analysis of HOAS encodings by considering
+ encodings of intuitionistic and linear logics, and formally derive the
+ admissibility of cut for important subsets of these logics. We then
+ propose an approach to avoid the apparent tradeoff between the
+ benefits of higherorder abstract syntax and the ability to analyze
+ the resulting encodings. We illustrate this approach through examples
+ involving the simple functional and imperative programming languages
+ $PCF$ and $PCF_{:=}$. We formally derive such properties as unicity of
+ typing, subject reduction, determinacy of evaluation, and the
+ equivalence of transition semantics and natural semantics
+ presentations of evaluation.",
+ paper = "Mcdo02.pdf"
}
\end{chunk}
\index{Leroy, Xavier}
+\index{Barras, Bruno}
\begin{chunk}{axiom.bib}
@article{Lero09,
 author = "Leroy, Xavier",
 title = {{A Formally Verified Compiler Backend}},
 journal = "Logic in Computer Science",
 volume = "43",
 number = "4",
 pages = "363446",
 year = "2009",
 abstract =
 "This article describes the development and formal verificat ion
 (proof of semantic preservation) of a compiler backend from Cminor
 (a simple imperative intermediate language) to PowerPC assembly code,
 using the Coq proof assistant both for programming the compiler and
 for proving its soundness. Such a verified compiler is useful in the
 context of formal methods applied to the certification of critical
 software: the verification of the compiler guarantees that the safety
 properties proved on the source code hold for the executable compiled
 code as well.",
 paper = "Lero09.pdf"
+@techreport{Barr96a,
+ author = "Barras, Bruno",
+ title = {{Coq en Coq}},
+ institution = "INRIA",
+ year = "1996",
+ type = "Research Report",
+ number = "inria00073667",
+ comment = "French",
+ abstract =
+ "The essential step of the formal verification of a proofchecker such
+ as Coq is the verification of its kernel: a typechecker for the
+ {\sl Calculus of Inductive Constructions} (CIC) whihc is its underlying
+ formalism. The present work is a first smallscale attempt on a
+ significative fragment of CIC: the Calculus of Constructions (CC). We
+ formalize the definition and the metatheory of (CC) in Coq. In
+ particular, we prove strong normalization and decidability of type
+ inference. From the latter proof, we extract a certified {\sl Caml
+ Light} program, which performs type inference (or typechecking) for
+ an arbitrary typing judgement in CC. Integrating this program in a
+ larger system, including a parser and prettyprinter, we obtain a
+ standalone proofchecker, called {\sl CoC}, the {\sl Calculus of
+ Constructions}. As an example, the formal proof of Newman's lemma,
+ build with Coq, can be reverified by {\sl CoC} with reasonable
+ performance.",
+ paper = "Barr96a.pdf"
}
\end{chunk}
\index{Dargaye, Zaynah}
\index{Leroy, Xavier}
+\index{Barras, Bruno}
+\index{Werner, Benjamin}
\begin{chunk}{axiom.bib}
@article{Darg07,
 author = "Dargaye, Zaynah and Leroy, Xavier",
 title = {{Mechanized Verification of CPS Transformations}},
 journal = "LNCS",
 volume = "4790",
 pages = "211225",
 year = "2007",
+@misc{Barr18,
+ author = "Barras, Bruno and Werner, Benjamin",
+ title = {{Coq in Coq}},
+ link =
+"\url{http://www.lix.polytechnique.fr/Labo/Bruno.Barras/publi/coqincoq.pdf}",
+ comment = "https://github.com/coqcontribs/coqincoq",
abstract =
 "Transformation to continuationpassing style (CPS) is often performed
 by optimizing compilers for functional programming languages. As part
 of the development and proof of correctness of a compiler for the
 miniML functional language, we have mechanically verified the
 correctness of two CPS transformations for a callbyvalue λ
 $\lambda$calculus with nary functions, recursive functions, data
 types and patternmatching. The transformations generalize Plotkin’s
 original callbyvalue transformation and Danvy and Nielsen’s
 optimized transformation, respectively. We used the Coq proof
 assistant to formalize the transformations and conduct and check the
 proofs. Originalities of this work include the use of bigstep
 operational semantics to avoid difficulties with administrative
 redexes, and of twosorted de Bruijn indices to avoid difficulties
 with $\alpha$conversion.",
 paper = "Darg07.pdf"
}

\end{chunk}

\index{Manolios, Panagiotis}
\index{Moore, J Strother}
\begin{chunk}{axiom.bib}
@article{Mano03,
 author = "Manolios, Panagiotis and Moore, J Strother",
 title = {{Partial Functions in ACL2}},
 journal = "J. of Automated Reasoning",
 volume = "31",
 pages = "107127",
 year = "2003",
 abstract =
 "We describe a method for introducing 'partial functions' into ACL2,
 that is, functions not defined everywhere. The function 'definitions'
 are actually admitted via the encapsulation principle: the new
 function symbol is constrained to satisfy the appropriate
 equation. This is permitted only when a witness function can be
 exhibited, establishing that the constraint is satisfiable. Of
 particular interest is the observation that every tail recursive
 definition can be witnessed in ACL2. We describe a macro that allows
 the convenient introduction of arbitrary tail recursive functions, and
 we discuss how such functions can be used to prove theorems about
 state machine models without reasoning about “clocks” or counting the
 number of steps until termination. Our macro for introducing “partial
 functions” also permits a variety of other recursive schemes, and we
 briefly illustrate some of them.",
 paper = "Mano03.pdf",
+ "We formalize the definition and the metatheory of the Calculus of
+ Constructions (CC) using the proof assistant Coq. In particular,
+ we prove strong normalization and decidability of type
+ inference. From the latter proof, we extract a certified Objective
+ Caml program which performs type inference in CC and use this code
+ to build a smallscale certified proofchecker.",
+ paper = "Barr18.pdf",
keywords = "printed"
}
\end{chunk}
\index{Boyer, Robert S.}
\index{Moore, J Strother}
+\index{Huet, G\'erard P.}
\begin{chunk}{axiom.bib}
@article{Boye75,
 author = "Boyer, Robert S. and Moore, J Strother",
 title = {{Proving Theorems About LISP Functions}},
 journal = "J. ACM",
 volume = "22",
+@article{Huet75,
+ author = "Huet, Gerard P.",
+ title = {{A Unification Algorithm for typed $\lambda$Calculus}},
+ journal = "Theoretical Computer Science",
+ volume = "1",
number = "1",
 pages = "129144",
+ pages = "2557",
year = "1975",
+ abstract =
+ "A semidecision algorithm is presented, to search for unification of
+ formulas in typed $\omega$order $\lambda$calculus, and its
+ correctness is proved.
+
+ It is shown that the search space is significantly smaller than the
+ one for finding the most general unifiers. In particluar, our search
+ is not redundant. This allows our algorithm to have good
+ directionality and convergence properties.",
+ paper = "Huet75.pdf"
+}
+
+\end{chunk}
+
+\index{Huet, G\'erard P.}
+\index{Lang, Bernard}
+\begin{chunk}{axiom.bib}
+@article{Huet78,
+ author = "Huet, Gerard P. and Lang, Bernard",
+ title = {{Proving and Applying Program Transformations Expressed
+ with SecondOrder Patterns}},
+ journal = "Acta Informatica",
+ volume = "11",
+ number = "1",
+ pages = "3155",
+ year = "1978",
abstract =
 "Program verification is the idea that properties of programs can be
 precisely stated and proved in the mathematical sense. In this paper,
 some simple heuristics combining evaluation and mathematical induction
 are describe, which the authors have implemented in a program that
 automatically proves a wide variety of theorems about recursive LISP
 functions. The method the program uses a generate induction formulas
 is described at length. The theorems proved by the program include
 that REVERSE is its own inverse and that a particular SORT program is
 correct. A list of theorems proved by the program is given.",
 paper = "Boye75.pdf"
+ "We propose a program transformation method based on rewritingrules
+ composed of secondorder schemas. A complete secondorder matching
+ algorithm is presented that allows effective use of these rules. We
+ show how to formally prove the correctness of the rules using a
+ denotational semantics for the programming language. We establish the
+ correctness of the transformation method itself, and give techniques
+ pertaining to its actual implementation. The paper is illustrated with
+ recursion removal examples."
}
\end{chunk}
\index{Gordon, Michael J.C.}
+\index{Hamming, Richard}
\begin{chunk}{axiom.bib}
@inproceedings{Gord89,
 author = "Gordon, Michael J.C.",
 title = {{Mechanizing Programming Logics in Higher Order Logic}},
 booktitle = "Current Trends in Hardware Verification and Automated
 Theorem Proving",
 publisher = "Springer",
 year = "1989",
+@misc{Hamm95,
+ author = "Hamming, Richard",
+ title = {{Hamming, 'You and Your Research'}},
+ link = "\url{https://www.youtube.com/watch?v=a1zDuOPkMSw}",
+ year = "1995"
+}
+
+\end{chunk}
+
+\index{Dolan, Stephen}
+\begin{chunk}{axiom.bib}
+@phdthsis{Dola16,
+ author = "Dolan, Stephen",
+ title = {{Algebraic Subtyping}},
+ school = "University of Cambridge",
+ year = "2016",
+ link = "\url{https://www.cl.cam.ac.uk/~sd601/thesis.pdf}",
abstract =
 "Formal reasoning about computer programs can be based directly on the
 seman tics of the programming language, or done in a special purpose
 logic like Hoare logic. The advantage of the first approach is that
 it guarantees that the formal reasoning applies to the language being
 used (it is well known, for example, that Hoare's assignment axiom
 fails to hold for most programming languages). The advantage of the
 second approach is that the proofs can be more direct and natural.
+ "Type inference gives programmers the benefit of static,
+ compiletime type checking without the cost of manually specifying
+ types, and has long been a standard feature of functional programming
+ languages. However, it has proven difficult to integrate type
+ inference with subtyping, since the unification engine at the core of
+ classical type inference accepts only equations, not subtyping
+ constraints.
 In this paper, an attempt to get the advantages of both approaches
 is described. The rules of Hoare logic are mechanically derived
 from the semantics of a simple imperative programming language
 (using the HOL system). These rules form the basis for a simple
 program verifier in which verification conditions are generated by LCF
 style tactics whose validations use the derived Hoare rules.
 Because Hoare logic is derived, rather than postulated, it is straight
 tforw ard to mix seman tic and axiomatic rea soning. It is also
 forward to combine the constructs of Hoare logic with other
 applicationspecific notations. This is briefly illustrated for
 various logical constructs, including termination statements, VDMstyle
 `relational' correctness specifications, weakest precondition
 statements and dynamic logic formulae.
+ This thesis presents a type system combining MLstyle parametric
+ polymorphism and subtyping, with type inference, principal types,
+ and decidable type subsumption. Type inference is based on {\sl
+ biunification} , an analogue of unification that works with
+ subtyping constraints.
 The theory underlying the work presented here is well known. Our
 contribution is to propose a way of mechanizing this theory in a
 way that makes certain practical details work out smoothly .",
 paper = "Gord89.pdf"
 keywords = "printed"
+ Making this possible are several contributions, beginning with the
+ notion of an 'extensible' type system, in which an open world of
+ types is assumed, so that no typeable program becomes untypeable
+ by the addition of new types to the language. While previous
+ formulations of subtyping fail to be extensible, this thesis
+ shows that adopting a more algebraic approach can remedy this.
+ Using such an approach, this thesis develops the theory of
+ biunification, shows how it is used to infer types, and shows how
+ it can be efficiently implemented, exploiting deep connections
+ between the algebra of regular languages and polymorphic subtyping.",
+ paper = "Dola16.pdf"
}
\end{chunk}
\index{Guessarian, Irene}
\index{Meseguer, Jose}
+\index{Downen, Paul}
+\index{Maurer, Luke}
+\index{Ariola, Zena M.}
\begin{chunk}{axiom.bib}
@article{Gues87,
 author = "Guessarian, Irene and Meseguer, Jose",
 title = {{On the Axiomatization of 'IfTHENELSE'}},
 journal = "SIAM J. Comput.",
 volume = "16",
 number = "2",
 year = "1987",
 abstract =
 "The equationally complete proof system for "ifthenelse" of Bloom
 and Tindell (this Journal, 12 (1983), pp. 677707) is extended to a
 complete proof system for manysorted algebras with extra operations,
 predicates and equations among those. We give similar completeness
 results for continuous algebras and program schemes (infinite trees)
 by the methods of algebraic semantics. These extensions provide a
 purely equational proof system to prove properties of functional
 programs over userdefinable data types.",
 paper = "Gues87.pdf"
+@inproceedings{Down16,
+ author = "Downen, Paul and Maurer, Luke and Ariola, Zena M.",
+ title = {{Sequent Calculus as a Compiler Intermediate Language}},
+ booktitle = "ICFP'16",
+ year = "2016",
+ pages = "7488",
+ publisher = "ACM",
+ isbn = "9781450342193",
+ abstract =
+ "The $\lambda$calculus is popular as an intermediate language for
+ practical compilers. But in the world of logic it has a
+ lesserknown twin, born at the same time, called the {\sl sequent
+ calculus}. Perhaps that would make for a good intermediate
+ language, too? To explore this question we designed Sequent Core,
+ a practicallyoriented core calculus based on the sequent
+ calculus, and used it to reimplement a substantial chunk of the
+ Glasgow Haskell Compiler.",
+ paper = "Down16.pdf"
}
\end{chunk}
\index{Keimel, Klaus}
\index{Plotkin, Gordon D.}
+\index{Zeilberger, Noam}
\begin{chunk}{axiom.bib}
@article{Keim09,
 author = "Keimel, Klaus and Plotkin, Gordon D.",
 title = {{Predicate Transformers for Extended Probability and
 NonDeterminism}},
 journal = "Math. Struct. in Comp. Science",
 volume = "19",
 pages = "501539",
+@phdthesis{Zeil09,
+ author = "Zeilberger, Noam",
+ title = {{The Logical Basis of Evaluation Order and PatternMatching}},
+ school = "Carnegie Mellon University",
year = "2009",
+ link = "\url{https://www.cs.cmu.edu/~rwh/theses/zeilberger.pdf}",
+ abstract =
+ "An old and celebrated analogy says that writing programs is like
+ proving theorems. This analogy has been productive in both
+ directions, but in particular has demonstrated remarkable utility in
+ driving progress in programming languages, for example leading towards
+ a better understanding of concepts such as abstract data types and
+ polymorphism. One of the best known instances of the analogy actually
+ rises to the level of an isomorphism: between Gentzen’s natural
+ deduction and Church’s lambda calculus. However, as has been
+ recognized for a while, lambda calculus fails to capture some of the
+ important features of modern programming languages. Notably, it does
+ not have an inherent notion of evaluation order, needed to make sense
+ of programs with side effects. Instead, the historical descendents of
+ lambda calculus (languages like Lisp, ML, Haskell, etc.) impose
+ evaluation order in an ad hoc way.
+
+ This thesis aims to give a fresh take on the proofsasprograms
+ analogy—one which better accounts for features of modern programming
+ languages—by starting from a different logical foundation. Inspired
+ by Andreoli’s focusing proofs for linear logic, we explain how to
+ axiomatize certain canonical forms of logical reasoning through a
+ notion of pattern. Propositions come with an intrinsic polarity,
+ based on whether they are defined by patterns of proof, or by patterns
+ of refutation. Applying the analogy, we then obtain a programming
+ language with builtin support for patternmatching, in which
+ evaluation order is explicitly reflected at the level of types—and
+ hence can be controlled locally, rather than being an ad hoc, global
+ policy decision. As we show, different forms of continuationpassing
+ style (one of the historical tools for analyzing evaluation order)
+ can be described in terms of different polarizations. This language
+ provides an elegant, uniform account of both untyped and
+ intrinsicallytyped computation (incorporating ideas from infinitary
+ proof theory), and additionally, can be provided an extrinsic type
+ system to express and statically enforce more refined properties of
+ programs. We conclude by using this framework to explore the theory of
+ typing and subtyping for intersection and union types in the presence
+ of effects, giving a simplified explanation of some of the unusual
+ artifacts of existing systems.",
+ paper = "Zeil09.pdf"
+}
+
+\end{chunk}
+
+\index{Girard, JeanYves}
+\begin{chunk}{axiom.bib}
+@misc{Giar95,
+ author = "Girard, JeanYves",
+ title = {{Linear Logic: Its Syntax and Semantics}},
+ year = "1995",
+ paper = "Gira95.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Dolan, Stephen}
+\index{Mycroft, Alan}
+\begin{chunk}{axiom.bib}
+@article{Dola17,
+ author = "Dolan, Stephen and Mycroft, Alan",
+ title = {{Polymorphism, Subtyping, and Type Inference in MLsub}},
+ journal = "ACM SIGPLAN Notices",
+ volume = "52",
+ number = "1",
+ year = "2017",
+ pages = "6072",
+ link = "\url{https://www.cl.cam.ac.uk/~sd601/papers/mlsubpreprint.pdf}",
abstract =
 "We investigate laws for predicate transformers for the combination of
 nondeterministic choice and (extended) probabilistic choice, where
 predicates are taken to be functions to the extended nonnegative
 reals, or to closed intervals of such reals. These predicate
 transformers correspond to state transformers, which are functions to
 conical powerdomains, which are the appropriate powerdomains for the
 combined forms of nondeterminism. As with standard powerdomains for
 nondeterministic choice, these come in three flavours – lower, upper
 and (order)convex – so there are also three kinds of predicate
 transformers. In order to make the connection, the powerdomains are
 first characterised in terms of relevant classes of functionals.
+ "We present a type system combining subtyping and MLstyle parametric
+ polymorphism. Unlike previous work, our system supports type inference
+ and has compact principal types. We demonstrate this system in the
+ minimal language MLsub, which types a strict superset of core ML
+ programs.
 Much of the development is carried out at an abstract level, a kind of
 domaintheoretic functional analysis: one considers dcones, which are
 dcpos equipped with a module structure over the nonnegative extended
 reals, in place of topological vector spaces. Such a development still
 needs to be carried out for probabilistic choice per se ; it would
 presumably be necessary to work with a notion of convex space rather
 than a cone.",
 paper = "Keim09.pdf"
+ This is made possible by keeping a strict separation between the types
+ used to describe inputs and those used to describe outputs, and
+ extending the classical unification algorithm to handle subtyping
+ constraints between these input and output types. Principal types are
+ kept compact by type simplification, which exploits deep connections
+ between subtyping and the algebra of regular languages. An
+ implementation is available online.",
+ paper = "Dola17.pdf"
}
\end{chunk}
\index{Gordon, Michael J.}
\index{Milner, Arthur j.}
\index{Wadsworth, Christopher P.}
+\index{BonnaireSergeant, Ambrose}
\begin{chunk}{axiom.bib}
@article{Gord79,
 author = "Gordon, Michael J. and Milner, Arthur j. and
 Wadsworth, Christopher P.",
 title = {{Edinburgh LCF, A Mechanised Logic of Computation:
 Introduction}},
 journal = "LNCS",
 volume = "78",
 year = "1979"
 paper = "Gord79.pdf"
+@misc{Bonn18,
+ author = "BonnaireSergeant, Ambrose",
+ title = {{Are unsound type systems wrong?}},
+ link =
+"\url{http://frenchy64.github.io/2018/04/07/unsoundnessinuntypedtypes.html}",
}
\end{chunk}
\index{Gordon, Michael J.}
\index{Milner, Arthur j.}
\index{Wadsworth, Christopher P.}
+\index{Tuhola, Henri}
\begin{chunk}{axiom.bib}
@article{Gord79a,
 author = "Gordon, Michael J. and Milner, Arthur j. and
 Wadsworth, Christopher P.",
 title = {{Edinburgh LCF, A Mechanised Logic of Computation: ML}},
 journal = "LNCS",
 volume = "78",
 year = "1979"
 paper = "Gord79a.pdf"
+@misc{Tuho18,
+ author = "Tuhola, Henri",
+ title = {{An another MLsub study}},
+ link = "\url{http://boxbase.org/entries/2018/mar/12/mlsubstudy/}",
+ year = "2018",
+ comment = "Subtyping does not solve the problem of coercion"
}
\end{chunk}
\index{Gordon, Michael J.}
\index{Milner, Arthur j.}
\index{Wadsworth, Christopher P.}
+\index{Tuhola, Henri}
\begin{chunk}{axiom.bib}
@article{Gord79b,
 author = "Gordon, Michael J. and Milner, Arthur j. and
 Wadsworth, Christopher P.",
 title = {{Edinburgh LCF, A Mechanised Logic of Computation: PPLAMBDA}},
+@misc{Tuho18a,
+ author = "Tuhola, Henri",
+ title = {{Boxbase  Index}},
+ link = "\url{http://boxbase.org/}",
+ year = "2018",
+ keywords = "axiomref"
+}
+
+\end{chunk}
+
+\index{Tarditi, David}
+\index{Acharya, Anurag}
+\index{Lee, Peter}
+\begin{chunk}{axiom.bib}
+@techreport{Tard90,
+ author = "Tarditi, David and Acharya, Anurag and Lee, Peter",
+ title = {{No Assembly Required: Compiling Standard ML to C}},
+ type = "technical report",
+ institution = "Carnegie Mellon University",
+ number = "CMUCS90187",
+ year = "1990",
+ abstract =
+ "C has been proposed as a portable target language for
+ implementing higherorder languages. Previous efforts at compiling
+ such languages to C have produced efficient code, but have had to
+ compromise on either the portability or the preservation of the
+ tailrecursive properties of the languages. We assert that neither
+ of these compromises is necessary for the generation of efficient
+ code. We offer a Standard ML to C compiler, which does not make
+ wither of these compromises, as an existence proof. The generated
+ code achieves an execution speed that is just a factor of two
+ slower than the best native code compiler. In this paper, we
+ describe the design, implementation and the performance of this
+ compiler.",
+ paper = "Tard90.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Appel, Andrew W.}
+\index{Jim, Trevor}
+\begin{chunk}{axiom.bib}
+@techreport{Appe88,
+ author = "Appel, Andrew W. and Jim, Trevor",
+ title = {{ContinuationPassing, ClosurePassing Style}},
+ institution = "Princston University",
+ year = "1988",
+ number = "CSTR18388",
+ abstract =
+ "We implemented a continuationpassing style (CPS) code generator for
+ ML. Our CPS language is represented as an ML datatype in which all
+ functions are named and most kinds of illformed expressions are
+ impossible. We separate the code generation into phases that rewrite
+ this representation into eversimpler forms. Closures are represented
+ explicitly as records, so that closure strategies can be communicated
+ from one phase to another. No stack is used. Our benchmark data shows
+ that the new method is an improvement over our previous,
+ abstractmachine based code generator.",
+ paper = "Appe88.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Appel, Andrew W.}
+\index{MacQueen, David B.}
+\begin{chunk}{axiom.bib}
+@article{Appe87,
+ author = "Appel, Andrew W. and MacQueen, David B.",
+ title = {{A Standard ML Compiler}},
journal = "LNCS",
 volume = "78",
 year = "1979"
 paper = "Gord79b.pdf"
+ volume = "274",
+ pages = "301324",
+ year = "1987",
+ abstract =
+ "Standard ML is a major revision of earlier dialects of the functional
+ language ML. We describe the first compiler written for Standard ML in
+ Standard ML. The compiler incorporates a number of novel features and
+ techniques, and is probably the largest system written to date in
+ Standard ML.
+
+ Great attention was paid to modularity in the construction of the
+ compiler, leading to a successful largescale test of the modular
+ capabilities of Standard ML. The front end is useful for purposes
+ other than compilation, and the back end is easily retargetable (we
+ have code generators for the VAX and MC68020). The module facilities
+ of Standard ML were taken into account early in the design of the
+ compiler, and they particularly influenced the environment management
+ component of the front end. For example, the symbol table structure is
+ designed for fast access to opened structures.
+
+ The front end of the compiler is a single phase that integrates
+ parsing, environment management, and type checking. The middle end
+ uses a sophisticated decision tree scheme to produce efficient
+ pattern matching code for functions and case expressions. The abstract
+ syntax produced by the front end is translated into a simple
+ lambdacalculusbased intermediate representation that lends itself to
+ easy case analysis and optimization in the code generator. Special
+ care was taken in designing the mntime data structures for fast
+ allocation and garbage collection.
+
+ We describe the overall organization of the compiler and present some
+ of the data representations and algorithms used in its various
+ phases. We conclude with some lessons learned about the ML language
+ itself and about compilers for modem functional languages.",
+ paper = "Appe87.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Lewis, Robert Y.}
+\index{Steele, Guy Lewis}
\begin{chunk}{axiom.bib}
@article{Lewi17,
 author = "Lewis, Robert Y.",
 title = {{An Extensible Ad Hoc Interface between Lean and Mathematica}},
 journal = "EPTCS",
 volume = "262",
 pages = "2337",
 year = "2017",
+@techreport{Stee78,
+ author = "Steele, Guy Lewis",
+ title = {{RABBIT: A Compiler for SCHEME}
+ type = "technical report",
+ number = "AITR474",
+ year = "1978",
abstract =
 "We implement a userextensible ad hoc connection between the Lean
 proof assistant and the computer algebra system Mathematica. By
 reflecting the syntax of each system in the other and providing a
 flexible interface for extending translation, our connection allows
 for the exchange of arbitrary information between the two systems. We
 show how to make use of the Lean metaprogramming framework to verify
 certain Mathematica computations, so that the rigor of the proof
 assistant is not compromised.",
 paper = "Lewi17.pdf",
 keywords = "axiomref,printed"
+ "We have developed a compiler for the lexicallyscoped dialect of LISP
+ known as SCHEME. The compiler knows relatively little about specific
+ data manipulation primitives such as arithmetic operators, but
+ concentrates on general issues of environment and control. Rather than
+ having specialized knowledge about a large variety of control and
+ environment constructs, the compiler handles only a small basis set
+ which reflects the semantics of lambdacalculus. All of the
+ traditional imperative constructs, such as sequencing, assignment,
+ looping, GOTO, as well as many standard LISP constructs such as AND,
+ OR, and COND, are expressed in macros in terms of the applicative
+ basis set. A small number of optimization techniques, coupled with the
+ treatment of function calls as GOTO statements, serve to produce code
+ as good as that produced by more traditional compilers. The macro
+ approach enables speedy implementation of new constructs as desired
+ without sacrificing efficiency in the generated code.
+
+ A fair amount of analysis is devoted to determining whether
+ environments may be stackallocated or must be heapallocated.
+ Heapallocated environments are necessary in general because SCHEME
+ (unlike Algol 60 and Algol 68, for example) allows procedures with
+ free lexically scoped variables to be returned as the values of other
+ procedures; the Algol stackallocation environment strategy does not
+ suffice. The methods used here indicate that a heapallocating
+ generalization of the 'display' technique leads to an efficient
+ implementation of such 'upward funargs'. Moreover, compiletime
+ optimization and analysis can eliminate many 'funargs' entirely, and
+ so far fewer environment structures need be allocated at run time than
+ might be expected.
+
+ A subset of SCHEME (rather than triples, for example) serves as the
+ representation intermediate between the optimized SCHEME code and the
+ final output code; code is expressed in this subset in the socalled
+ continuationpassing style. As a subset of SCHEME, it enjoys the same
+ theoretical properties; one could even apply the same optimizer used
+ on the input code to the intermediate code. However, the subset is so
+ chosen that all temporary quantities are made manifest as variables,
+ and no control stack is needed to evaluate it. As a result, this
+ apparently applicative representation admits an imperative
+ interpretation which permits easy transcription to final imperative
+ machine code. These qualities suggest that an applicative language
+ like SCHEME is a better candidate for an UNCOL than the more
+ imperative candidates proposed to date.",
+ paper = "Stee78.pdf"
}
\end{chunk}
\index{Clint, M.}
\index{Hoare, C.A.R}
+\index{PeytonJones, Simon}
\begin{chunk}{axiom.bib}
@article{Clin72,
 author = "Clint, M. and Hoare, C.A.R",
 title = {{Program Proving: Jumps and Functions}},
 journal = "Acta Informatica",
 volume = "1",
 pages = "214224",
 year = "1972",
+@misc{Peyt17,
+ author = "PeytonJones, Simon",
+ title = {{Escape from the ivory tower: the Haskell journey}},
+ link = "\url{https://www.youtube.com/watch?v=re96UgMk6GQ}",
+ year = "2017"
+}
+
+\end{chunk}
+
+\index{Wadler, Philip}
+\begin{chunk}{axiom.bib}
+@article{Wadl92,
+ author = "Wadler, Philip",
+ title = {{Comprehending Monads}},
+ journal = "Mathematical Structures in Computer Science",
+ volume = "2",
+ pages = "461493",
+ year = "1992",
abstract =
 "Proof methods adequate for a wide range of computer programs have
 been expounded in [1] and [2]. This paper develops a method suitable
 for programs containing functions, and a certain kind of jump. The
 method is illustrated by the proof of a useful and efficient program
 for table lookup by logarithmic search.",
 paper = "Clin72.pdf"
+ "Category theorists invented {\sl monads} in the 1960s to
+ concisely express certain aspects of universal algebra. Functional
+ programmers invented {\sl list comprehensions} in the 1970s ot
+ concisely express certain programs involving lists. This paper
+ shows how list comprehensions may be generalised to an arbitrary
+ monad, and how the resulting programming feature can concisely
+ express in a pure functional language some programs that
+ manipulate state, handle exceptions, parse text, or invoke
+ continuations. A new solution to the old problem of destructive
+ array update is also presented. No knowledge of category theory is
+ assumed.",
+ paper = "Wadl92.pdf"
}
\end{chunk}
\index{Floyd, Robert W.}
+\index{PeytonJones, Simon}
+\index{Wadler, Philip}
\begin{chunk}{axiom.bib}
@article{Floy64,
 author = "Floyd, Robert W.",
 title = {{Algorithm 245: Treesort}},
 journal = "CACM",
 volume = "7",
 number = "12",
 year = "1964",
 pages = "701",
 paper = "Floy64.pdf"
+@inproceedings{Peyt93,
+ author = "PeytonJones, Simon and Wadler, Philip",
+ title = {{Imperative Functional Programming}},
+ booktitle = "Principles of Programming Languages POPL'93",
+ publisher = "ACM",
+ year = "1993",
+ pages = "7184",
+ abstract =
+ "We present a new model, based on monads, for performing
+ input/output in a nonstrict, purely functional language. It is
+ composable, extensible, efficient, requires no extensions to the
+ type system, and extends smoothly to incorporate mixedlanguage
+ working and inplace array updates.",
+ paper = "Peyt93.pdf"
}
\end{chunk}
\index{Dijkstra, E.W.}
+\index{Hughes, John}
\begin{chunk}{axiom.bib}
@incollection{Dijk72,
 author = "Dijkstra, E.W.",
 title = {{Notes on Structured Programming}},
 booktitle = "Structured Programming",
 publisher = "Academic Press",
 year = "1972",
 pages = "182"
+@inproceedings{Hugh90,
+ author = "Hughes, John",
+ title = {{Why Functional Programming Matters}},
+ booktitle = "Research Topics in Functional Programming",
+ publisher = "AddisonWesley",
+ year = "1990",
+ pages = "1742",
+ abstract =
+ "As software becomes more and more complex, it is more and more
+ important to structure it well. Wellstructured software is easy to
+ write and to debug, and provides a collection of modules that can be
+ reused to reduce future programming costs. In this paper we show that
+ two fea tures of functional languages in particular, higherorder
+ functions and lazy evaluation, can contribute significantly to
+ modularity. As examples, we manipulate lists and trees, program
+ several numerical algorithms, and implement the alphabeta heuristic
+ (an algorithm from Artificial Intelligence used in gameplaying
+ programs). We conclude that since modularity is the key to successful
+ programming, functional programming offers important advantages for
+ software development.",
+ paper = "Hugh90.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Hoare, C.A.R}
+\index{R\'emy, Didier}
\begin{chunk}{axiom.bib}
@incollection{Hoar72,
 author = "Hoare, C.A.R",
 title = {{Notes on Data Structuring}},
 booktitle = "Structured Programming",
 publisher = "Academic Press",
 year = "1972",
 pages = "83174"
+@misc{Remy17,
+ author = "Remy, Didier",
+ title = {{Type Systems for Programming Languages}},
+ ywar = "2017"
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours1.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours2.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours3.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours4.pdf}",
+ link = "\url{http://gallium.inria.fr/~remy/mpri/cours5.pdf}",
+ paper = "Remy17.tgz"
}
\end{chunk}
\index{Dahl, OleJohan}
\index{Hoare, C.A.R}
+\index{Wadler, Philip}
\begin{chunk}{axiom.bib}
@incollection{Dahl72,
 author = "Dahl, OleJohan and Hoare, C.A.R",
 title = {{Hierachical Program Structure}},
 booktitle = "Structured Programming",
 publisher = "Academic Press",
 year = "1972",
 pages = "175220""
+@article{Wadl07,
+ author = "Wadler, Philip",
+ title = {{The GirardReynolds isomorphism (second edition)}},
+ journal = "Theoretical Computer Science",
+ volume = "375",
+ pages = "201226",
+ year = "2007",
+ abstract =
+ "eanYves Girard and John Reynolds independently discovered the
+ secondorder polymorphic lambda calculus, F2. Girard additionally
+ proved a Representation Theorem : every function on natural numbers
+ that can be proved total in secondorder intuitionistic predicate
+ logic, P2, can be represented in F2. Reynolds additionally proved an
+ Abstraction Theorem : every term in F2 satisfies a suitable notion of
+ logical relation; and formulated a notion of parametricity satisfied
+ by wellbehaved models.
+
+ We observe that the essence of Girard’s result is a projection from P2
+ into F2, and that the essence of Reynolds’s result is an embedding of
+ F2 into P2, and that the Reynolds embedding followed by the Girard
+ projection is the identity. We show that the inductive naturals are
+ exactly those values of type natural that satisfy Reynolds’s notion of
+ parametricity, and as a consequence characterize situations in which
+ the Girard projection followed by the Reynolds embedding is also the
+ identity.
+
+ An earlier version of this paper used a logic over untyped terms. This
+ version uses a logic over typed term, similar to ones considered by
+ Abadi and Plotkin and by Takeuti, which better clarifies the
+ relationship between F2 and P2.",
+ paper = "Wadl07.pdf"
}
\end{chunk}
\index{Lamport, Leslie}
\import{Owicki, Susan}
+\index{Morrisett, Greg}
+\index{Walker, David}
+\index{Crary, Karl}
+\index{Glew, Neil}
\begin{chunk}{axiom.bib}
@article{Lamp81,
 author = "Lamport, Leslie and Owicki, Susan",
 title = {{Program Logics and Program Verification}},
 journal = "LNCS",
 volume = "131",
 pages = "197199",
 year = "1981",
 paper = "Lamp81.pdf",
+@article{Morr99,
+ author = "Morrisett, Greg and Walker, David and Crary, Karl and Glew, Neil",
+ title = {{From System F to Typed Assembly Language}},
+ booktitle = "Trans. on Progamming Languages and Systems TOPLAS",
+ volume = "21"
+ number = "3",
+ year = "1999",
+ pages = "527568",
+ abstract =
+ "We motivate the design of typed assembly language (TAL) and present a
+ typepreserving ttranslation from Systemn F to TAL. The typed assembly
+ language we pressent is based on a conventional RISC assembly
+ language, but its static type sytem provides support for enforcing
+ highlevel language abstratctions, such as closures, tuples, and
+ userdefined abstract data types. The type system ensures that
+ welltyped programs cannot violatet these abstractionsl In addition,
+ the typing constructs admit many lowlevel compiler optimiztaions. Our
+ translation to TAL is specified as a sequence of typepreserving
+ transformations, including CPS and closure conversion phases;
+ typecorrect source programs are mapped to typecorrect assembly
+ language. A key contribution is an approach to polymorphic closure
+ conversion that is considerably simpler than previous work. The
+ compiler and typed assembly lanugage provide a fully automatic way to
+ produce certified code, suitable for use in systems where unstrusted
+ and potentially malicious code must be checked for safety before
+ execution.",
+ paper = "Morr99.pdf",
keywords = "printed"
}
\end{chunk}
\index{Glasner, Ingrid}
\index{Loeckx, Jacquest}
+\index{Kranz, David}
+\index{Kelsey, Richard}
+\index{Rees, Jonathan}
+\index{Hudak, Paul}
+\index{Philbin, James}
+\index{Adams, Norman}
\begin{chunk}{axiom.bib}
@article{Glas78,
 author = "Glasner, Ingrid and Loeckx, Jacquest",
 title = {{A Calculus for Proving Properties of WhilePrograms}},
 journal = "LNCS",
 volume = "75",
 pages = "252281",
 year = "19"78,
 paper = "Glas78.pdf",,
+@inproceedings{Kran86,
+ author = "Kranz, David and Kelsey, Richard and Rees, Jonathan and
+ Hudak, Paul and Philbin, James and Adams, Norman",
+ title = {{ORBIT: An Optimizing Compiler for Scheme}},
+ booktitle = "SIGLAN '86",
+ publisher = "ACM",
+ pages = "219233",
+ year = "1986",
+ abstract =
+ "In this paper we describe an optimizing compiler for Scheme
+ called {\sl Orbit} that incorporates our experience with an
+ earlier Scheme compiler called TC, together with some ideas from
+ Steele's Rabbit compiler. The three main design goals have been
+ correctness, generating very efficient compiled code, and
+ portability.",
+ paper = "Kran86.pdf",
keywords = "printed"
}
\end{chunk}
\index{Cartwright, Robert}
\index{McCarthy, John}
+\index{Flanagan, Cormac}
+\index{Sabry, Amr}
+\index{Duba, Bruce F.}
+\index{Felleisen, Matthias}
\begin{chunk}{axiom.bib}
@article{Cart78,
 author = "Cartwright, Robert and McCarthy, John",
 title = {{Recursive Programs as Functions in a First Order Theory}},
 journal = "LNCS",
 volume = "75",
 pages = "576629",
 year = "1978",
 abstract =
 "Pure Lisp style recursive function programs are represented in a new
 way by sentences and schemata of first order logic. This permits easy
 and natural proofs of extensional properties of such programs by
 methods that generalize structural induction. It also systematizes
 known methods such as recursion induction, subgoal induction,
 inductive assertions by interpreting them as first order axiom
 schemata. We discuss the metatheorems justifying the representation
 and techniques for proving facts about specific programs. We also give
 a simpler version of the GiSdeIKleene way of representing computable
 functions by first order sentences.",
 paper = "Cart78.pdf"
+@misc{Flan03,
+ author = "Flanagan, Cormac and Sabry, Amr and Duba, Bruce F. and
+ Felleisen, Matthias",
+ title = {{The Essence of Compiling with Continuations}},
+ link =
+ "\url{https://www.cs.rice.edu/~javaplt/411/17spring/Readings/essenceretro.pdf}",
+ paper = "Flan03.pdf",
+ keywords = "printed"
}
\end{chunk}
\index{Cartwright, Robert}
+\index{Harper, Robert}
+\index{Lillibridge, Mark}
\begin{chunk}{axiom.bib}
@phdthesis{Cart76
 author = "Cartwright, Robert",
 title = {{A Practical Formal Semantic Definition and Verification
 System for Typed LISP}},
 school = "Stanford Artificial Intelligence Labs",
 year = "1976",
 abstract =
 'Despite the fact that computer scientists have developed a variety of
 formal methods for proving computer programs correct, the formal
 verification of a nontrivial program is still a formidable
 task. Moreover, the notion of proof is so imprecise in most existing
 verification systems, that the validity of the proofs generated is
 open to question. With an aim toward rectifying these problems, the
 research discussed in this dissertation attempts to accomplish the
 following objectives:
 \begin{enumerate}
 \item To develop a programming language which is sufficiently powerful
 to express many interesting algorithms clearly and succintly, yet
 simple enough to have a tractable formal semantic definition
 \item To completely specify both proof theoretic and model theoretic
 formal semantics for this language using the simplest possible
 abstractions
 \item To develop an interactive program verification system for the
 language which automatically performs as many of the straightforward
 steps in a verification as possible
 \end{enumerate}",
 paper = "Cart76.pdf",
}

\end{chunk}

\index{O'Donnell, Michael J.}
\begin{chunk}{axiom.bib}
@article{Odon81,
 author = "O'Donnell, Michael J.",
 title = {{A Critique of the Foundations of Hoarestyle Programming Logics}},
 journal = "LNCS",
 volume = "131",
 pages = "349374",
 year = "1981",
 abstract =
 "Much recent discussion in computing journals has been devoted to
 arguments about the feasibility and usefulness of formal
 verification methods for increasing confidence in computer
 programs. Too little attention has been given to precise criticism
 of specific proposed systems for reasoning about programs. Whether
 such systems are to be used for formal verification, by hand or
 automatically, or as a rigorous foundation for imformal reasoning,
 it is essential that they be logically sound. Several popular
 rules in the Hoare language are in fact not sound. These rules
 have been accepted because they have not been subjected to
 sufficiently strong standards of correctness. This paper attempts
 to clarify the different technical definitions of correctness of a
 logic, to show that only the strongest of these definitions is
 acceptable for Hoare logic, and to correct some of the unsound
 rules which have appeared in the literature. The corrected rules
 are given merely to show that it is possible to do so. Convenient
 and elegant rules for reasoning about certain programming
 constructs will probably require a more flexible notation than
 Hoare's.",
 paper = "Odon81.pdf",
+@inproceedings{Harp93b,
+ author = "Harper, Robert and Lillibridge, Mark",
+ title = {{Explicit Polymorphism and CPS Conversion}},
+ booktitle = "Symp. of Principles of Programming Languages",
+ publisher = "ACM Press",
+ year = "1993",
+ pages = "206=219"
+ abstract =
+ "We study the typing properties of CPS conversion for an extension
+ of F$\omega$ with control operators. Two classes of evaluation
+ strategies are considered, each with callbyname and callbyvalue
+ variants. Under the 'standard' strategies, constructor abstractions
+ are values, and constructor applications can lead to nontrivial
+ control effects. In contrast, the 'MLlike' strategies evaluate
+ beneath constructor abstractions, reflecting the usual interpretation
+ of programs in languages based on implicit polymorphism. Three
+ continuation passing style sublanguages are considered, one on which
+ the standard strategies coincide, one on which the MLlike
+ strategies coincide, and one on which all the strategies coincide.
+ Compositional, typepreserving CPS transformation algorithms are
+ given for the standard strategies, resulting in terms on which all
+ evaluation strategies coincide. This has as a corollary the
+ soundness and termination of welltyped programs under the standard
+ evaluation strategies. A similar result is obtained for the MLlike
+ callbyname strategy . In contrast, such results are obtained for
+ the callbyvalue MLlike strategy only for a restricted
+ sublanguage in which constructor abstractions are limited to
+ values.",
+ paper = "Harp93b.pdf",
+ keywords = "printed"
+}
+
+\end{chunk}
+
+\index{Sabry, Amr}
+\index{Felleisen, Matthias}
+\begin{chunk}{axiom.bib}
+@techreport{Sabr92,
+ author = "Sabry, Amr and Felleisen, Matthias",
+ title = {{Reasoning About Programs in ContinuationPassing Style}},
+ type = "technical report",
+ number = "TR 92180",
+ institution = "Rice University",
+ abstract =
+ "Plotkin's $\lambda$value calculus is sound but incomplete for
+ reasoning about $\beta\nu$transformations on programs in continuation
+ passing style (CPS). To find a complete extension, we define a new,
+ compactifying CPS transformation and an 'inverse' mapping,
+ $un$CPS, both of which are interesting in their own right. Using the
+ new CPS transformation, we can determine the precise language of CPS
+ terms closed under $\beta\nu$ transformations. Using the $un$CPS
+ transformation, we can derive a set of axioms such that every equation
+ between source programs is provable if and only if $\beta\nu$ can
+ prove the corresponding equation between CPS programs. The extended
+ calculus is equivalent to an untyped variant of Moggi's computational
+ $\lambda$calculus.",
+ paper = "Sabr92.pdf",
keywords = "printed"
}
\end{chunk}
\index{Manna, Zohar}
\index{Waldinger, Richard}
+\index{Phelps, Tom}
\begin{chunk}{axiom.bib}
@article{Mann78a,
 author = "Manna, Zohar and Waldinger, Richard",
 title = {{Is 'sometime' sometimes better than 'always'?}},
 journal = "CACM",
 volume = "21",
 number = "2",
 year = "1978",
 abstract =
 "This paper explores a technique for proving the correctness and
 termination of programs simultaneously. This approach, the
 {\sl intermittentassertion method}, involves documenting the
 program with assertions that must be true at some time when
 control passes through the corresponding point, but that need not
 be true every time. The method, introduced by Burstall, promises
 to provide a valuable complement to the more conventional methods.

 The intermittentassertion method is presented with a number of
 examples of correctness and termination proofs. Some of these
 proofs are markedly simpler than their conventional
 counterparts. On the other hand, it is shown that a proof of
 correctness or termination by any of the conventional techniques
 can be rephrased directly as a proof using intermittent
 assertions. Finally it is shown how the intermittent assertion
 method can be applied to prove the validity of program
 transformations and the correctness of continuously operating
 systems.",
 paper = "Mann78a.pdf, printed"
}

\end{chunk}

\index{Katz, Shmuel}
\index{Manna, Zohar}
\begin{chunk}{axiom.bib}
@article{Katz75,
 author = "Katz, Shmuel and Manna, Zohar",
 title = {{A Closer Look at Termination}},
 journal = "Acta Informatica",
 volume = "5",
 pages = "333352",
 year = "1975",
+@misc{Phel92,
+ author = "Phelps, Tom",
+ title = {{A Common Lisp CGOL}},
+ year = "1992",
+ link = "\url{https://people.eecs.berkeley.edu/~fateman/cgol/cgol.1/cgol.ps}",
abstract =
 "Several methods for proving that computer programs terminate are
 presented and illustrated. The methods considered involve (a)
 using the 'noinfinitelydescendingchain' property of
 wellfounded sets (Floyd's approach), (b) bounding a counter
 associated with each loop ({\sl loop} approach), (c) showing that
 some exit of each loop must be taken ({\sl exit} approach), or (d)
 inducting on the structure of the data domain (Burstall's
 approach). We indicate the relative merit of each method for
 proving termination or nontermination as an integral part of an
 automatic verification system.",
 paper = "Katz75.pdf",
+ "CGOL is an Algollike notation for Lisp. The original version,
+ written by Vaughan Pratt at M.I.T. is the early 70s was written in
+ Maclisp. This new version is based on Common Lisp. CGOL was translated
+ to Common Lisp in the following fourstage process:
+
+ (1) cgol.tok, the tokenizer has been almost completely rewritten; (2)
+ cgoll.l, the main translation loop with library of translation schemas
+ has been converted from Maclisp to Common Lisp; (3) the code that
+ cgol1.l produces has been converted to Common Lisp; (4) selected
+ examples of CGOL programs themselves were rewritten, since certain
+ aspects of the semantics of Maclisp would otherwise not be modelled in
+ the expected fashion. Maclisp differs from Common Lisp in a variety of
+ respects, and some of them are apparent from CGOL including direct
+ escapes to Lisp, variable scoping, function definitions and numerous
+ other aspects.
+
+ In contrast to the programming described above, the major contribution
+ of this paper is annotation of selected code from the CGOL
+ translator.",
+ paper = "Phil92.pdf"
+}
+
+\end{chunk}
+
+\index{Duran, Antonio J.}
+\index{Perez, Mario}
+\index{Varona, Juan L.}
+\begin{chunk}{axiom.bib}
+@article{Dura14,
+ author = "Duran, Antonio J. and Perez, Mario and Varona, Juan L.",
+ title = {{The Misfortunes of a Trio of Mathematicians Using Computer
+ Algebra Systems. Can We Trust in Them?}},
+ journal = "Notices of the AMS",
+ volume = "61",
+ number = "10",
+ year = "2014",
+ link = "\url{www.ams.org/notices/201410/rnotip1249.pdf}",
+ pages = "12491252",
+ paper = "Dura14.pdf",
keywords = "printed"
}
diff git a/src/axiomwebsite/patches.html b/src/axiomwebsite/patches.html
index 4cf5aa5..a19b687 100644
 a/src/axiomwebsite/patches.html
+++ b/src/axiomwebsite/patches.html
@@ 5924,6 +5924,8 @@ books/bookvolbib add references
books/bookvol13 add chapter Here is the Problem
20180401.01.tpd.patch
books/bookvolbib add references
+20180415.01.tpd.patch
+books/bookvolbib add references

1.9.1