diff --git a/doc/proving.ivy b/doc/proving.ivy new file mode 100644 index 0000000..5388cbd --- /dev/null +++ b/doc/proving.ivy @@ -0,0 +1,780 @@ +#lang ivy1.7 + +# IVy as a theorem prover +# ======================= + +# In the development of systems, we sometimes have to reason about +# mathematical functions and relations in ways that automated theorem +# provers can't handle reliably. For these cases, IVy provides a +# facility that allows the user to supply the necessary proofs. IVy's +# approach to theorem proving is designed to make maximal use of +# automated provers. We do this by localizing the proof into +# "isolates". Our verification conditions in each isolate are confined +# to a logical fragment that an automated prover can handle reliably. +# +# +# Primitives +# ========== +# +# A logical development in IVy is a succession of statements or +# *judgments*. Each judgment must be justified by a primitive axiom or +# inference rule. +# +# Type declarations +# ----------------- +# +# On such primitive is a type declaration, like this: +# + type t +# +# This can be read as "let `t` be a type". This judgement is admissible +# provided `t` is new symbol that has not been used up to this point +# in the development. +# +# Functions and individuals +# ------------------------- +# +# We can introduce a constant like this: + + individual n : t + +# where `n` is new. This can be read as "let `n` be of type +# `t`". Every type has at least one element in it. This judgement gives +# a name to such an element. +# +# Similarly, we can introduce new function and relation symbols: + + function f(X:t) : t + relation r(X:t,Y:t) + +# The first can be read as "for all *X* of type *t*, let *f*(*X*) be of type *t*". +# +# Axioms +# ------ +# +# An *axiom* is a statement that is admitted by fiat. For example: + + axiom [symmety_r] r(X,Y) -> r(Y,X) + +# The free variable *X* in the formula is taken as universally +# quantified. The text `symmetry_r` between brackets is a name for the +# axiom and is optional. The axiom is simply admitted in the current +# context without proof. Axioms are dangerous, since they can introduce +# inconsistencies. You should use axioms only if you are developing a +# foundational theory and you know what you are doing. +# +# Properties +# ---------- +# +# A *property* is a statement that can be admitted only if it follows +# logically from judgments in the current context. For example: + + property [myprop] r(n,X) -> r(X,n) + +# A property requires a proof (see below). If a proof is not supplied, +# IVy applies its proof tactic `auto`. This calls the automated prover +# Z3 to attempt to prove the property from the previously admitted +# judgments in the current context. +# +# Definitions +# ----------- +# +# A definition is a special form of axiom that cannot introduce an +# inconsistency. As an example: + + function g(X:t) : t + + definition g(X) = f(X) + 1 + +# This is read as "for all *X*, let *g*(*X*) be *f*(*X*) + 1". The +# definition is admissible if the symbol *g* is "fresh", meaning it does +# not occur in any existing properties or definitions. Further *g* must +# not occur on the right-hand side of the equality (that is, a recursive +# definition is not admissible without proof -- see "Recursion" below). +# +# Theory instantiations +# --------------------- +# +# IVy has built-in theories that can be instantated with a specific type +# as their carrier. For example, the theory of integer arithmetic is +# called `int`. It has the signature `{+,-,*,/,<}`, plus the integer +# numerals, and provides the axioms of Peano arithmetic. To instantiate +# the theory `int` using type *t* for the integers, we write: + + type u + interpret u -> int + +# This declaration requires that type `u` is not previously interpreted +# and that the symbols `{+,-,*,/,<}` in the signature of `int` are +# fresh. Notice, though, that the symbols `{+,-,*,/,<}` are +# overloaded. This means that `+` over distinct types is considered to +# be a distinct symbol. Thus, we can we can instantiate the `int` +# theory over different types (and also instantiate other theories that +# have these symbols in their signature). +# +# Schemata +# ======== +# +# A *schema* is a compound judgment that takse a collection of judgments +# as input (the premises) and produces a judgment as output (the +# conclusion). If the schema is valid, then we can provide any +# type-correct values for the premises and the conclusion will follow. +# +# Here is a simple example of a schema taken as an axiom: + + axiom [congruence] { + type d + type r + function f(X:d) : r + #-------------------------- + property X = Y -> f(X) = f(Y) + } + +# The schema is contained in curly brackets and gives a list of premises +# following a conclusion. In this case, it says that, given types *d* and *r* and a function *f* from +# *d* to *r* and any values *X*,*Y* of type *d*, we can infer that *X*=*Y* implies +# *f*(*X*) = *f*(*Y*). The dashes separating the premises from the conclusion are +# just a comment. The conclusion is always the last judgement in the rule. +# +# The keyword `axiom` tells IVy that this rule should be taken as a +# given, without proof. However, as we will see, the default `auto` +# tactic treats a schema differently from a simple proposition. That is, +# a schema is never used by default, but instead must be explicitly +# instantiated. This allows is to express and use facts that, if they +# occurred in a verification condition, would take us outside the +# decidable fragment. +# +# Any judgment that has been admitted in the current context can be +# *applied* in a proof. When we apply a schema, we supply values for its +# premises to infer its conclusion. +# + + property [prop_n] Z = n -> Z + 1 = n + 1 + proof + apply congruence; + showgoals + +# The `proof` declaration tells IVy to apply the axiom schema `congruence` to prove the property. +# IVy tries to match the proof goal `prop_n` to the schema's conclusion by picking particular +# values for premises, that is, the types *d*,*r* function *f*. It also chooses values for the +# the free variables *X*,*Y* occurring in the schema. In this case, it +# discovers the following assignment: +# +# # d = t +# # r = t +# # X = Z +# # Y = n +# # f(N) = N + 1 +# +# After plugging in this assignment, the conclusion of the rule exactly matches +# the property to be proved, so the property is admitted. +# +# The problem of finding an assignment such as the one above is one of +# "second order matching". It is a hard problem, and the answer is not +# unique. In case IVy did not succeed in finding the above match, we +# could also write the proof more explicitly, like this: + + property [prop_n_2] Z = n -> Z + 1 = n + 1 + proof + apply congruence with X=Z, Y=n, f(X) = X:t + 1 + +# Each of the above equations acts as a constraint on the +# assignment. That is, it must convert *X* to +# *Z*, *Y* to *n* and *f*(*X*) to *X* + 1. Notice that we had to +# explicitly type *X* on the right-hand side of the last equation, +# since its type couldn't be inferred (and in fact it's not the same +# as the type of *X* on the left-hand side, which is *d*). +# +# It's also possible to write constraints that do not allow for any +# assignment. In this case, Ivy complains that the provided match is +# inconsistent. +# +# Proof chaining +# -------------- +# +# When applying a schema, we are not required to provide values for the +# premises of the schema that are properties. An unsupplied premise +# becomes a *subgoal* which we must then prove. +# +# For example, consider the following axiom schema: + + relation man(X:t) + relation mortal(X:t) + + axiom [mortality_of_man] { + property [prem] man(X) + #--------------- + property mortal(X) + } + +# We take as a given that Socrates is a man, and prove he is mortal: + + individual socrates : t + + axiom [soc_man] man(socrates) + + property mortal(socrates) + proof + apply mortality_of_man + +# The axiom `mortality _of_man`, requires us supply the premise +# `man(socrates)`. Since this premise isn't present in our theorem, +# IVy sets it up as a proof subgoal. We didn't supply a proof of this +# subgoal, so IVy uses its default tactic `auto`, which in this case +# can easily prove that the axiom `man(socrates)` implies +# `man(socrates)`. +# +# If we wanted to prove this manually, however, we could continue our proof by +# applying the axiom `soc_man`, like this: + + property mortal(socrates) + proof + apply mortality_of_man; + apply soc_man + +# The prover maintains a list of proof goals, to be proved in order from +# first to last +# Applying a rule, if it succeeds, removes the first goal from the list, +# possibly replacing it with subgoals. At the end of the proof, the +# prover applies its default tactic `auto` to the remaining goals. +# +# In the above proof, we start with this goal: +# +# # property mortal(socrates) +# +# Applying axiom `mortality_of_man` we are left with the following subgoal: +# +# # property man(socrates) +# +# Applying axiom `soc_man` then leaves us with no subgoals. +# +# Notice that the proof works backward from conclusions to premises. +# +# A note on matching: There may be many ways to match a given proof +# goal to the conclusion of a rule. Different matches can result in +# different sub-goals, which may affect whether a proof succeeds. IVy +# doesn't attempt to verify that the match it finds is unique. For +# this reason, when it isn't obvious there there is a single match, it +# may be a good idea to give the match explicitly (though we didn't +# above, as in this case there is only one possible match). +# +# When chaining proof rules, it is helpful to be able to see the +# intermediate subgoals. This can be done with the `showgoals` tactic, +# like this: + + property mortal(socrates) + proof + apply mortality_of_man; + showgoals; + apply soc_man + +# When checking the proof, the `showgoals` tactic has the effect of +# printing the current list of proof goals, leaving the goals unchanged. +# A good way to develop a proof is to start with just `showgoals`, and to add tactics +# before this. Running the Ivy proof checker in an Emacs compilation buffer +# is a convenient way to do this. +# +# Theorems +# -------- +# +# Thus far, we have seen schemata used only as axioms. However, we can also +# prove the validity of a schema as a *theorem*. For example, here is a theorem +# expressing the transitivity of equality: + + theorem [trans] { + type t + property X:t = Y + property Y:t = Z + #-------------------------------- + property X:t = Z + } + +# We don't need a proof for this, since the `auto` tactic can handle +# it. The verification condition that ivy generates is `X = Y & Y = Z +# -> X = Z`. Here is a theorem that lets us elimiante universal +# quantifiers: + + theorem [elimA] { + type t + function p(X:t) : bool + property forall Y. p(Y) + #-------------------------------- + property p(X) + } + +# It says, for any predicate *p*, if `p(Y)` holds for all *Y*, then +# `p(X)` holds for a particular *X*. Again, the `auto` tactic can +# prove this easily. Now let's derive a consequence of these facts. A +# function *f* is *idempotent* if applying it twice gives the same +# result as applying it once. This theorem says that, if *f* is +# idempotent, then applying three times is the same as applying it +# once: + + theorem [idem2] { + type t + function f(X:t) : t + property forall X. f(f(X)) = f(X) + #-------------------------------- + property f(f(f(X))) = f(X) + } + +# The auto tactic can't prove this because the presmise contains a +# function cycle with a universally quantified variable. Here's the +# error message it gives: +# +# | error: The verification condition is not in logic epr. +# | +# | Note: the following functions form a cycle: +# | function f(V0:t) : t +# +# This means we'll need to apply some other tactic. Here is one possible proof: +# +# proof +# apply trans with Y = f(f(X)); +# apply elimA with X = f(X); +# apply elimA with X = X +# +# We think this theorem holds because `f(f(f(X)))` is equal to `f(f(X))` +# (by idempotence of *f*) which in turn is equal to `f(X)` (again, by +# idempotence of *f*). This means we want to apply the transitivy rule, where +# the intermediate term *Y* is `f(f(x))`. Notice we have to give the value of *Y* +# explicitly. Ivy isn't clever enough to guess this intermediate term for us. +# This gives us the following two proof subgoals: +# +# theorem [prop2] { +# type t +# function f(V0:t) : t +# property [prop5] forall X. f(f(X)) = f(X) +# #---------------------------------------- +# property f(f(f(X))) = f(f(X)) +# } +# +# +# theorem [prop3] { +# type t +# function f(V0:t) : t +# property [prop5] forall X. f(f(X)) = f(X) +# #---------------------------------------- +# property f(f(X)) = f(X) +# } +# +# Now we see the the conclusion in both cases is an instance of the +# idempotence assumption, for a particular value of *X*. This means +# we can apply the `elimA` rule that we proved above. In the first case, +# the value of `X` for which we are claiming idempotence is `f(X)`. +# With this assignment, IVy can match `p(X)` to `f(f(f(X))) = f(X)`. +# Thi substituion produces a new subgoal as follows: +# +# theorem [prop2] { +# type t +# function f(V0:t) : t +# property [prop5] forall X. f(f(X)) = f(X) +# #---------------------------------------- +# property forall Y. f(f(f(Y))) = f(f(Y)) +# } +# +# This goal is trivial, since the conclusion is exactly the same as one +# of the premises, except for the names of bound variables. Ivy proves +# this goal automatically and drops it from the list. This leaves the +# second subgoal `prop3` above. We can also prove this using `elimA`. In +# this case `X` in the `elimA` rule corresponds to `X` in our goal. +# Thus, we write `apply elimA with X = X`. This might seem a little +# strange, but keep in mind that the `X` on the left refers to `X` in +# the `elimA` rule, while `X` on the right refers to `X` in our goal. It +# just happens that we chose the same name for both variables. +# +# Ance again, the subgoal we obtain is trivial and Ivy drops it. Since +# there are no more subgoals, the proof is now complete. +# +# # ### The deduction library +# +# Facts like `trans` and `elimA` above are included in the library +# `deduction`. This library contains a complete set of rules of a system +# [natural deduction](https://en.wikipedia.org/wiki/Natural_deduction) +# for first-order logic with equality. +# +# +# Recursion +# --------- +# +# Recursive definitions are permitted in IVy by instantiating a +# *definitional schema*. As an example, consider the following axiom schema: +# +# axiom [rec[t]] { +# type q +# function base(X:t) : q +# function step(X:q,Y:t) : q +# fresh function fun(X:t) : q +# #--------------------------------------------------------- +# definition fun(X:t) = base(X) if X <= 0 else step(fun(X-1),X) +# } +# +# This schema shows how to construct a fresh function `fun` from two functions: +# +# - `base` gives the value of the function for inputs less than or equal to zero. +# - `step` gives the value for positive *X* in terms of *X* and the value for *X*-1 +# +# A definition schema such as this requires that the defined function +# symbol be fresh. With this schema, we can define a recursive function that +# adds the non-negative numbers less than or equal to *X* like this: +# +# function sum(X:t) : t +# +# definition [defsum] { +# sum(X:t) = 0 if X <= 0 else (X + sum(X-1)) +# } +# proof +# apply rec[t] +# +# Notice that we wrote the definition as a schema rather than a simple +# proposition. This is because it has a universally quantified variable +# `X` under arithmetic operators, which puts it outside the decidable +# fragment. Becuase this definitoin is a schema, when we want to use it, +# we'll have to apply it explicitly, +# +# In order to admit this definition, we applied the definition +# schema `rec[t]`. Ivy infers the following assignment: +# +# q=t, base(X) = 0, step(X,Y) = Y + X, fun(X) = sum(X) +# +# This allows the recursive definition to be admitted, providing that +# `sum` is fresh in the current context (i.e., we have not previously refered to +# `sum` except to declare its type). +# +# When we instantiate a theory, it generally comes with a recursion +# schema for the given type. For example, if we say: +# +# intepret t -> int +# +# then the above recursion schema `rec[t]` automatically becomes available. +# +# ### Extended matching +# +# Suppose we want to define a function that takes an additional +# parameter. For example, before summing, we want to divide all the +# numbers by *N*. We can define such a function like this: +# +# definition [defsumdiv] +# sumdiv(N,X) = 0 if X <= 0 else (X/N + sumdiv(N,X-1)) +# proof +# apply rec[t] +# +# In matching the recursion schema `rec[t]`, IVy will extend the +# function symbols in the premises of `rec[t]` with an extra parameter *N* so that +# schema becomes: +# +# axiom [ rec[t] ] = { +# type q +# function base(N:t,X:t) : q +# function step(N:t,X:q,Y:t) : q +# fresh function fun(N:t,X:t) : q +# #--------------------------------------------------------- +# definition fun(N:t,X:t) = base(N,X) if X <= 0 else step(N,fun(N,X-1),X) +# } +# +# The extended schema matches the definition, with the following assignment: +# +# q=t, base(N,X)=0, step(N,X,Y)=Y/N+X, fun(N,X) = sum2(N,X) +# +# This is somewhat as if functions were "curried", in which case the +# free symbol `fun` would match the curried function term `sumdiv N`. +# Since Ivy's logic doesn't allow for partial application of functions, +# extended matching provides an alternative. Notice that, +# to match the recursion schema, a function definition must be +# recursive in its *last* parameter. +# +# Induction +# --------- +# +# The `auto` tactic can't generally prove properties by induction. For +# that IVy needs manual help. To prove a property by induction, we define +# an invariant and prove it by instantiating an induction schema. Here is +# an example of such a schema, that works for the non-negative integers: +# +# axiom ind[t] = { +# relation p(X:t) +# property X <= 0 -> p(X) +# property p(X) -> p(X+1) +# #-------------------------- +# property p(X) +# } +# +# Like the recursion schema `rec[t]`, the induction schema `ind[t]` is +# part of the integer theory, and becomes available when we interpret +# type `t` as `int`. +# +# Suppose we want to prove that `sum(Y)` is always greater than or equal +# to *Y*, that is: +# +# property sum(Y) >= Y +# +# We can prove this by applying our induction schema: +# +# proof +# apply ind[t]; +# assume sum with X = Y; +# defergoal; +# assume sum with X = Y + 1; +# +# +# Applying `ind[t]` produces two sub-goals, a base case and an induction step: +# +# property X <= 0 -> sum(X) <= X +# +# property sum(X) <= X -> sum(X+1) <= X + 1 +# +# The `auto` tactic can't prove these goals because the definition of +# `sum` is a schema that must explicitly instantiated (if we try it, +# we'll get counterexamples). Fortunately, it suffices to instantiate +# this schema just for the specific arguments of `sum` in our +# subgoals. For the base case, we need to instantiate the definition for +# `X`, while for the induction step, we need `X+1`. Notice that we +# referred to the definiton of `sum` by the name `sum`. Alternatively, +# we can name the definition itself and refer to it by this name. +# +# After instantiating the definition of `sum`, our two subgoals look like this: +# +# ind1.ivy: line 21: Proof goals: +# +# theorem [prop5] { +# property [def2] sum(Y + 1) = (0 if Y + 1 <= 0 else Y + 1 + sum(Y + 1 - 1)) +# property sum(Y) >= Y -> sum(Y + 1) >= Y + 1 +# } +# +# +# theorem [prop4] { +# property [def2] sum(Y) = (0 if Y <= 0 else Y + sum(Y - 1)) +# property Y:t <= 0 -> sum(Y) >= Y +# } +# +# +# Because these goals are quantifier-free the `auto` tactic can easily +# handle them, so our proof is complete. +# +# +# Naming +# ------ +# +# If we can prove that something exists, we can give it a name. For +# example, suppose that we can prove that, for every *X*, there exists a +# *Y* such that `succ(X,Y)`. The there exists a function that, given an +# *X*, produces such a *Y*. We can define such a function called `next` +# in the following way: +# +# property exists Y. succ(X,Y) named next(X) +# +# Provided we can prove the property, and that `next` is fresh, we can +# infer that, for all *X*, `succ(X,next(X))` holds. Defining a function +# in this way, (that is, as a Skolem function) can be quite useful in +# constructibg a proof. However, since proofs in Ivy are not generally +# constructive, we have no way to *compute* the function `next`, so we +# can't use it in extracted code. +# +# Hierarchical proof development +# ============================== +# +# As the proof context gets larger, it becomes increasingly difficult +# for the automated prover to handle all of the judgements we have +# admitted. This is especially true as combining facts or theories may +# take us outside the automated prover's decidable fragment. For this +# reason, we need some way to break the proof into manageable parts. +# For this purpose, IVy provides a mechanism to structure the proof into +# a collection of localized proofs called *isolates*. +# +# Isolates +# -------- +# +# An isolate is a restricted proof context. An isolate can make parts of +# its proof context available to other isolates and keep other parts +# hidden. Moreover, isolates can contain isolates, allowing us to +# structure a proof development hierarchically. +# +# Suppose, for example, that we want to define a function *f*, but keep +# the exact definition hidden, exposing only certain properties of the +# function. We could write something like this: +# +# type t +# function f(X:t) : t +# +# isolate t_theory = { +# +# interpret t -> int +# definition f(X) = X + 1 +# +# property [expanding] f(X) > X +# property [transitivity] X:t < Y & Y < Z -> X < Z +# +# } +# +# Any names declared within the isolate belong to its namespace. For +# example, the names of the two properties above are +# `t_theory.expanding` and `t_theory.transitivity`. +# +# The isolate contains four statements. The first, says the type `t` is +# to be interpreted as the integers. This instantiates the theory of the +# integers for type `t`, giving the usual meaning to operators like `+` +# and `<`. The second defines *f* to be the integer successor function. +# +# The remaining two statements are properties about *t* and *f* to +# be proved. These properties are proved using only the context of the +# isolate, without any judgments admitted outside. +# +# Now suppose we want to prove an extra property using `t_theory`: +# +# isolate extra = { +# +# property [prop] f(f(X)) > X +# +# } +# with t_theory.expanding, t_theory.transitivity +# +# The 'with' clause says that the properties `expanding` and +# `transitivy` from isolate `t_theory` should be included in the context when +# proving `extra.prop`. The interpretation of *t* as the integers +# and the definiton of *f* as the successor function are ignored. +# +# Exporting properties +# -------------------- +# +# As a more convenient alternative, we can tell IVy which facts in +# `t_theory` are to be made available to other isolates and which are to +# be hidden. We place the hidden parts in a section called +# `implementation`: +# +# type t +# function f(X:t) : t +# +# isolate t_theory = { +# +# implementation { +# interpret t -> int +# definition f(X) = X + 1 +# } +# +# property [expanding] f(X) > X +# property [transitivity] X:t < Y & Y < Z -> X < Z +# +# } +# +# Now, we can use just the visible parts of `t_theory` like this: +# +# isolate extra = { +# +# property [prop] f(f(X)) > X +# +# } +# with t_theory +# +# Hiding a theory inside an isolate in this way is a very useful +# technique for keeping verification conditions decidable. Although +# definitions in the implementation section of an isolate are hidden +# from the `auto` tactic while proving other isolates, they can still be +# used when extracting executable code. Also, other isolates can still +# use the implementation declarations by explicitly referring to them. +# In particular, attaching the clause `with t` to any isolate will cause +# the `auto` tactic to use the integer theory for `t` in that isolate. +# +# Hierarchies of isolates +# ----------------------- +# +# An isolate can in turn contain isolates. This allows us to structure a +# proof hierarchically. For example, the above proof could have been +# structured like this: +# +# type t +# function f(X:t) : t +# +# isolate extra = { +# +# isolate t_theory = { +# +# implementation { +# interpret t -> int +# definition f(X) = X + 1 +# } +# +# property [expanding] f(X) > X +# property [transitivity] X:t < Y & Y < Z -> X < Z +# +# } +# +# property [prop] f(f(X)) > X +# +# } +# +# The parent isolate uses only the visible parts of the child isolate. +# +# Proof ordering and refinement +# ----------------------------- +# +# Thus far, proof developments have been presented in order. That is, +# judgements occur in the file in the order in which they are admitted +# to the proof context. +# +# In practice, this strict ordering can be inconvenient. For example, +# from the point of view of clear presentation, it may often be better +# to state a theorem, and *then* develop a sequence of auxiliary +# definitions and lemmas needed to prove it. Moreover, when developing +# an isolate, we would like to first state the visible judgments, then +# give the hidden implementation. +# +# To achieve this, we can use a *specification* section. The +# declarations in this section are admitted logically *after* the other +# declarations in the section. +# +# As an example, we can rewrite the above proof development so that the +# visible properties of the isolates occur textually at the beginning: +# +# +# type t +# function f(X:t) : t +# +# isolate extra = { +# +# specification { +# property [prop] f(f(X)) > X +# } +# +# isolate t_theory = { +# +# specification { +# property [expanding] f(X) > X +# property [transitivity] X:t < Y & Y < Z -> X < Z +# } +# +# implementation { +# interpret t -> int +# definition f(X) = X + 1 +# } +# } +# } +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# diff --git a/ivy/ivy_ast.py b/ivy/ivy_ast.py index 9204647..cfae292 100644 --- a/ivy/ivy_ast.py +++ b/ivy/ivy_ast.py @@ -642,7 +642,7 @@ class TacticWithMatch(AST): def match(self): return self.args[2:] def __str__(self): - res = self.tactic_name() + ' ' + str(args[0]) + res = self.tactic_name() + ' ' + str(self.args[0]) if len(self.args) > 1: res += ' ' + str(self.args[1]) if len(self.args) > 2: @@ -660,6 +660,8 @@ class AssumeTactic(TacticWithMatch): class ShowGoalsTactic(AST): def tactic_name(self): return 'showgoals' + def __str__(self): + return 'showgoals' class DeferGoalTactic(AST): def tactic_name(self): diff --git a/ivy/ivy_compiler.py b/ivy/ivy_compiler.py index 2534271..5c3a329 100644 --- a/ivy/ivy_compiler.py +++ b/ivy/ivy_compiler.py @@ -324,7 +324,10 @@ ivy_ast.NamedBinder.cmpl = lambda self: ivy_logic.NamedBinder( self.args[0].compile() ) -ivy_ast.LabeledFormula.cmpl = lambda self: self.clone([self.label,sortify_with_inference(self.formula)]) +ivy_ast.LabeledFormula.cmpl = lambda self: self.clone([self.label, + self.formula.compile() + if isinstance(self.formula,ivy_ast.SchemaBody) + else sortify_with_inference(self.formula)]) # compiling update patterns is complicated because they declare constants internally def UpdatePattern_cmpl(self): @@ -761,7 +764,11 @@ class IvyDomainSetup(IvyDeclInterp): def object(self,atom): self.domain.add_object(atom.rep) def axiom(self,ax): - self.domain.labeled_axioms.append(ax.compile()) + cax = ax.compile() + if isinstance(cax.formula,ivy_ast.SchemaBody): + self.domain.schemata[cax.label.relname] = cax + else: + self.domain.labeled_axioms.append(cax) def property(self,ax): lf = ax.compile() self.domain.labeled_props.append(lf) @@ -963,6 +970,7 @@ class IvyDomainSetup(IvyDeclInterp): if lhs in interp or lhs in self.domain.native_types : raise IvyError(thing,"{} is already interpreted".format(lhs)) self.domain.native_types[lhs] = thing.formula.args[1] + print self.domain.sig return rhs = thing.formula.args[1].rep self.domain.interps[lhs].append(thing) @@ -971,9 +979,11 @@ class IvyDomainSetup(IvyDeclInterp): if lhs in interp: if interp[lhs] != rhs: raise IvyError(thing,"{} is already interpreted".format(lhs)) + print self.domain.sig return if isinstance(rhs,ivy_ast.Range): interp[lhs] = ivy_logic.EnumeratedSort(lhs,["{}:{}".format(i,lhs) for i in range(int(rhs.lo),int(rhs.hi)+1)]) + print self.domain.sig return for x,y,z in zip([sig.sorts,sig.symbols], [slv.is_solver_sort,slv.is_solver_op], diff --git a/ivy/ivy_logic.py b/ivy/ivy_logic.py index f1b0ddd..272de58 100644 --- a/ivy/ivy_logic.py +++ b/ivy/ivy_logic.py @@ -915,17 +915,17 @@ class WithSymbols(object): global sig self.saved = [] for sym in self.symbols: - if sig.contains_symbol(sym): - self.saved.append(sym) - sig.remove_symbol(sym) - sig.add_symbol(sym.name,sym.sort) + if sym.name in sig.symbols: + self.saved.append((sym.name,sig.symbols[sym.name])) + del sig.symbols[sym.name] + sig.symbols[sym.name] = sym return self def __exit__(self,exc_type, exc_val, exc_tb): global sig for sym in self.symbols: - sig.remove_symbol(sym) - for sym in self.saved: - sig.add_symbol(sym.name,sym.sort) + del sig.symbols[sym.name] + for name,sym in self.saved: + sig.symbols[name] = sym return False # don't block any exceptions class WithSorts(object): diff --git a/ivy/ivy_parser.py b/ivy/ivy_parser.py index a9d7e82..2e0a3e4 100644 --- a/ivy/ivy_parser.py +++ b/ivy/ivy_parser.py @@ -335,7 +335,7 @@ else: p[0] = check_non_temporal(p[1]) lf = LabeledFormula(p[4],p[5]) lf.lineno = get_lineno(p,3) - lf = addlabel(p[4],'axiom') + lf = addlabel(lf,'axiom') d = AxiomDecl(addtemporal(lf) if p[2] else check_non_temporal(lf)) d.lineno = get_lineno(p,3) p[0].declare(d) diff --git a/ivy/ivy_proof.py b/ivy/ivy_proof.py index b420f93..8649fe2 100644 --- a/ivy/ivy_proof.py +++ b/ivy/ivy_proof.py @@ -512,7 +512,22 @@ def compile_match_list(proof,left_goal,right_goal): def compile_one_match(lhs,rhs,freesyms,constants): if il.is_variable(lhs): return fo_match(lhs,rhs,freesyms,constants) - return match(lhs,rhs,freesyms,constants) + rhsvs = dict((v.name,v) for v in lu.used_variables_ast(rhs)) + vmatches = [{v.sort:rhsvs[v.name].sort} for v in lu.used_variables_ast(lhs) + if v.name in rhsvs and v.sort in freesyms] + vmatch = merge_matches(*vmatches) + iu.dbg('vmatch') + if vmatch is None: + return None + lhs = apply_match_alt(vmatch,lhs) + iu.dbg('lhs') + freesyms = apply_match_freesyms(vmatch,freesyms) + iu.dbg('freesyms') + somatch = match(lhs,rhs,freesyms,constants) + iu.dbg('somatch') + fmatch = merge_matches(vmatch,somatch) + iu.dbg('fmatch') + return fmatch def compile_match(proof,prob,schema,decl): """ Compiles match in a proof. Only the symbols in @@ -521,6 +536,7 @@ def compile_match(proof,prob,schema,decl): matches = compile_match_list(proof,schema,decl) matches = [compile_one_match(m.lhs(),m.rhs(),prob.freesyms,prob.constants) for m in matches] res = merge_matches(*matches) + iu.dbg return res @@ -717,6 +733,9 @@ def heads_match(pat,inst,freesyms): if it has the same name and if it agrees on the non-free sorts in its type. """ + iu.dbg('pat') + iu.dbg('inst') + return (il.is_app(pat) and il.is_app(inst) and funcs_match(pat.rep,inst.rep,freesyms) and pat.rep not in freesyms or not il.is_app(pat) and not il.is_quantifier(pat) and type(pat) is type(inst) and len(pat.args) == len(inst.args)) diff --git a/ivy/ivy_theory.py b/ivy/ivy_theory.py index e7d2a53..2ffbad8 100644 --- a/ivy/ivy_theory.py +++ b/ivy/ivy_theory.py @@ -291,20 +291,20 @@ def get_assumes_and_asserts(preconds_only): for ldf in im.module.definitions: if not isinstance(ldf.formula,il.DefinitionSchema): if ldf.formula.defines() not in ilu.symbols_ast(ldf.formula.rhs()): -# print 'macro : {}'.format(ldf.formula) + print 'macro : {}'.format(ldf.formula) macros.append((ldf.formula.to_constraint(),ldf)) else: # can't treat recursive definition as macro -# print 'axiom : {}'.format(ldf.formula) + print 'axiom : {}'.format(ldf.formula) assumes.append((ldf.formula.to_constraint(),ldf)) for ldf in im.module.labeled_axioms: if not ldf.temporal: -# print 'axiom : {}'.format(ldf.formula) + print 'axiom : {}'.format(ldf.formula) assumes.append((ldf.formula,ldf)) for ldf in im.module.labeled_props: if not ldf.temporal: -# print 'prop : {}{} {}'.format(ldf.lineno,ldf.label,ldf.formula) + print 'prop : {}{} {}'.format(ldf.lineno,ldf.label,ldf.formula) asserts.append((ldf.formula,ldf)) for ldf in im.module.labeled_conjs: