diff --git a/thys/ML_Unification/Examples/E_Unification_Examples.thy b/thys/ML_Unification/Examples/E_Unification_Examples.thy --- a/thys/ML_Unification/Examples/E_Unification_Examples.thy +++ b/thys/ML_Unification/Examples/E_Unification_Examples.thy @@ -1,156 +1,163 @@ \<^marker>\creator "Kevin Kappelmann"\ section \E-Unification Examples\ theory E_Unification_Examples imports Main ML_Unification_HOL_Setup + Unify_Assumption_Tactic Unify_Fact_Tactic + Unify_Resolve_Tactics begin paragraph \Summary\ text \Sample applications of e-unifiers, methods, etc. introduced in this session.\ experiment begin subsection \Using The Simplifier For Unification.\ inductive_set even :: "nat set" where zero: "0 \ even" | step: "n \ even \ Suc (Suc n) \ even" text \Premises of the form @{term "SIMPS_TO_UNIF lhs rhs"} are solved by @{ML_structure Simplifier_Unification}. It first normalises @{term lhs} and then unifies the normalisation with @{term rhs}. See also @{theory ML_Unification.ML_Unification_HOL_Setup}.\ lemma [uhint where prio = Prio.LOW]: "n \ 0 \ PROP SIMPS_TO_UNIF (n - 1) m \ n \ Suc m" unfolding SIMPS_TO_UNIF_eq by linarith text \By default, below unification methods use @{ML Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify}, which is a combination of various practical unification algorithms.\ schematic_goal "(\x. x + 4 = n) \ Suc ?x = n" by uassm lemma "6 \ even" apply (urule step) apply (urule step) apply (urule step) apply (urule zero) done lemma "(220 + (80 - 2 * 2)) \ even" apply (urule step) apply (urule step)+ apply (urule zero) done lemma assumes "[a,b,c] = [c,b,a]" shows "[a] @ [b,c] = [c,b,a]" using assms by uassm lemma "x \ ({z, y, x} \ S) \ {x}" by (ufact TrueI) -lemma "(x + (y :: nat))^2 \ x^2 + 2*x*y + y^2 + 4 * y + x - y" +schematic_goal "(x + (y :: nat))^2 \ x^2 + 2*x*y + y^2 + 4 * y + x - y" supply power2_sum[simp] by (ufact TrueI) lemma assumes "\s. P (Suc (Suc 0)) (s(x := (1 :: nat), x := 1 + 1 * 4 - 3))" shows "P 2 (s(x := 2))" - by (ufact assms[of s]) + by (ufact assms) subsection \Providing Canonical Solutions With Unification Hints\ -lemma length_nil_eq_zero [uhint]: "length [] \ 0" by simp - -schematic_goal "length ?xs = 0" - by (ufact refl) - lemma sub_self_eq_zero [uhint]: "(n :: nat) - n \ 0" by simp schematic_goal "n - ?m = (0 :: nat)" by (ufact refl) -text \The following fails because, by default, @{ML Standard_Unification_Hints.try_hints} -uses the higher-order pattern unifier to unify hints against a given disagreement pair, and -@{term 0} cannot be higher-order pattern unified with @{term "length []"}. The unification of the -hint requires the use of yet another hint, namely @{term "length [] = 0"} (cf. above).\ +text \The following example shows a non-trivial interplay of the simplifier and unification hints: +Using just unification, the hint @{thm sub_self_eq_zero} is not applicable in the following example +since @{term 0} cannot be unified with @{term "length []"}. +However, the simplifier can rewrite @{term "length []"} to @{term 0} and the hint can then be applied.\ + +(*uncomment to see the trace*) +declare [[ML_map_context \Logger.set_log_levels Logger.root Logger.TRACE\]] schematic_goal "n - ?m = length []" - \ \by (ufact refl)\ - oops + by (ufact refl) -text \There are two ways to fix this: -\<^enum> We allow the recursive use of unification hints when unifying @{thm sub_self_eq_zero} and our goal. -\<^enum> We use a different unification hint that makes the recursive use of unification hints explicit.\ +text \There are also two ways to solve this using only unification hints: +\<^enum> We allow the recursive use of unification hints when unifying @{thm sub_self_eq_zero} and our goal +and register @{term "length [] = 0"} as an additional hint. +\<^enum> We use an alternative for @{thm sub_self_eq_zero} that makes the recursive use of unification +hints explicit and register @{term "length [] = 0"} as an additional hint.\ + +lemma length_nil_eq [uhint]: "length [] = 0" by simp text \Solution 1: we can use @{attribute rec_uhint} for recursive usages of hints. Warning: recursive hint applications easily loop.\ + schematic_goal "n - ?m = length []" + supply [[ucombine del = \(Standard_Unification_Combine.default_metadata \<^binding>\simp_unif\)\]] + (*doesn't work*) + \ \by (ufact refl)\ supply sub_self_eq_zero[rec_uhint] by (ufact refl) text \Solution 2: make the recursion explicit in the hint.\ lemma [uhint]: "k \ 0 \ (n :: nat) \ m \ n - m \ k" by simp schematic_goal "n - ?m = length []" + supply [[ucombine del = \(Standard_Unification_Combine.default_metadata \<^binding>\simp_unif\)\]] by (ufact refl) - subsection \Strenghten Unification With Unification Hints\ lemma assumes [uhint]: "n = m" shows "n - m = (0 :: nat)" by (ufact refl) lemma assumes "x = y" shows "y = x" supply eq_commute[uhint] by (ufact assms) paragraph \Unfolding definitions.\ definition "mysuc n = Suc n" lemma assumes "\m. Suc n > mysuc m" shows "mysuc n > Suc 3" supply mysuc_def[uhint] by (ufact assms) paragraph \Discharging meta impliciations with object-level implications\ lemma [uhint]: "Trueprop A \ A' \ Trueprop B \ B' \ Trueprop (A \ B) \ (PROP A' \ PROP B')" using atomize_imp[symmetric] by simp lemma assumes "A \ (B \ C) \ D" shows "A \ (B \ C) \ D" using assms by ufact subsection \Better Control Over Meta Variable Instantiations\ text \Consider the following type-inference problem.\ schematic_goal assumes app_typeI: "\f x. (\x. ArgT x \ DomT x (f x)) \ ArgT x \ DomT x (f x)" and f_type: "\x. ArgT x \ DomT x (f x)" and x_type: "ArgT x" shows "?T (f x)" apply (urule app_typeI) \\compare with the following application, creating an (unintuitive) higher-order instantiation\ (* apply (rule app_typeI) *) oops end end diff --git a/thys/ML_Unification/Examples/Unification_Hints_Reification_Examples.thy b/thys/ML_Unification/Examples/Unification_Hints_Reification_Examples.thy --- a/thys/ML_Unification/Examples/Unification_Hints_Reification_Examples.thy +++ b/thys/ML_Unification/Examples/Unification_Hints_Reification_Examples.thy @@ -1,213 +1,216 @@ \<^marker>\creator "Kevin Kappelmann"\ \<^marker>\contributor "Paul Bachmann"\ section \Examples: Reification Via Unification Hints\ theory Unification_Hints_Reification_Examples imports HOL.Rat ML_Unification_HOL_Setup Unify_Fact_Tactic + Unify_Resolve_Tactics begin paragraph \Summary\ text \Reification via unification hints. For an introduction to unification hints refer to \<^cite>\"unif-hints"\. We support a generalisation of unification hints as described in @{theory ML_Unification.ML_Unification_Hints}.\ subsection \Setup\ text \One-time setup to obtain a unifier with unification hints for the purpose of reification. We could also simply use the standard unification hints @{attribute uhint} and @{attribute rec_uhint}, but having separate instances is a cleaner approach.\ ML\ @{functor_instance struct_name = Reification_Unification_Hints and functor_name = Term_Index_Unification_Hints and id = \"reify"\ and more_args = \ structure TI = Discrimination_Tree val init_args = { concl_unifier = NONE, prems_unifier = NONE, normalisers = SOME Higher_Order_Pattern_Unification.norms_unify, retrieval = SOME (Term_Index_Unification_Hints_Args.mk_sym_retrieval TI.norm_term TI.unifiables), hint_preprocessor = SOME (Standard_Unification_Hints.get_hint_preprocessor (Context.the_generic_context ())) }\} val reify_unify = Unification_Combinator.add_fallback_unifier (fn unif_theory => - Higher_Order_Pattern_Unification.e_unify Unification_Util.unify_types unif_theory unif_theory) + Higher_Order_Pattern_Unification.e_unify Unification_Util.unify_types unif_theory unif_theory + |> Type_Unification.e_unify Unification_Util.unify_types) (Reification_Unification_Hints.try_hints |> Unification_Combinator.norm_unifier (#norm_term Higher_Order_Pattern_Unification.norms_unify)) \ local_setup \Reification_Unification_Hints.setup_attribute NONE\ text \Premises of hints should again be unified by the reification unifier.\ declare [[reify_uhint where prems_unifier = reify_unify]] subsection \Formulas with Quantifiers and Environment\ text \The following example is taken from HOL-Library.Reflection\_Examples. It is recommended to compare the approach presented here with the reflection tactic presented in said theory.\ datatype form = TrueF | FalseF | Less nat nat | And form form | Or form form | Neg form | ExQ form primrec interp :: "form \ ('a::ord) list \ bool" where "interp TrueF vs \ True" | "interp FalseF vs \ False" | "interp (Less i j) vs \ vs ! i < vs ! j" | "interp (And f1 f2) vs \ interp f1 vs \ interp f2 vs" | "interp (Or f1 f2) vs \ interp f1 vs \ interp f2 vs" | "interp (Neg f) vs \ \ interp f vs" | "interp (ExQ f) vs \ (\v. interp f (v # vs))" paragraph \Reification with unification and recursive hint unification for conclusion\ text \The following illustrates how to use the equations @{thm interp.simps} directly as unification hints for reification.\ experiment begin text \Hints for list lookup.\ declare List.nth_Cons_Suc[reify_uhint where prio = Prio.LOW] and List.nth_Cons_0[reify_uhint] text \Hints to reify formulas of type @{type bool} into formulas of type @{type form}.\ declare interp.simps[reify_uhint] text \We have to allow the hint unifier to recursively look for hints during unification of the hint's conclusion.\ declare [[reify_uhint where concl_unifier = reify_unify]] (*uncomment the following to see the hint unification process*) (* declare [[ML_map_context \Logger.set_log_levels Unification_Base.logger Logger.DEBUG\]] *) schematic_goal "interp ?f (?vs :: ('a :: ord) list) = (\(x :: 'a). x < y \ \(\(z :: 'a). v < z \ \False))" by (ufact refl where unifier = reify_unify) text \While this all works nicely if set up correctly, it can be rather difficult to understand and debug the recursive unification process for a hint's conclusion. In the next paragraph, we present an alternative that is closer to the examples presented in the original unification hints paper \<^cite>\"unif-hints"\.\ end paragraph \Reification with matching without recursion for conclusion\ text \We disallow the hint unifier to recursively look for hints while unifying the conclusion; instead, we only allow the hint unifier to match the hint's conclusion against the disagreement terms.\ -declare [[reify_uhint where concl_unifier = Higher_Order_Pattern_Unification.match]] +declare [[reify_uhint where concl_unifier = + \Higher_Order_Pattern_Unification.match |> Type_Unification.e_match Unification_Util.match_types\]] text \However, this also means that we now have to write our hints such that the hint's conclusion can successfully be matched against the disagreement terms. In particular, the disagreement terms may still contain meta variables that we want to instantiate with the help of the unification hints. Essentially, a hint then describes a canonical instantiation for these meta variables.\ experiment begin lemma [reify_uhint where prio = Prio.LOW]: "n \ Suc n' \ vs \ v # vs' \ vs' ! n' \ x \ vs ! n \ x" by simp lemma [reify_uhint]: "n \ 0 \ vs \ x # vs' \ vs ! n \ x" by simp lemma [reify_uhint]: "\e \ ExQ f; \v. interp f (v # vs) \ P v\ \ interp e vs \ \v. P v" "\e \ Less i j; x \ vs ! i; y \ vs ! j\ \ interp e vs \ x < y" "\e \ And f1 f2; interp f1 vs \ r1; interp f2 vs \ r2\ \ interp e vs \ r1 \ r2" "\e \ Or f1 f2; interp f1 vs \ r1; interp f2 vs \ r2\ \ interp e vs \ r1 \ r2" "e \ Neg f \ interp f vs \ r \ interp e vs \ \r" "e \ TrueF \ interp e vs \ True" "e \ FalseF \ interp e vs \ False" by simp_all schematic_goal "interp ?f (?vs :: ('a :: ord) list) = (\(x :: 'a). x < y \ \(\(z :: 'a). v < z \ \False))" by (urule refl where unifier = reify_unify) end text \The next examples are modification from \<^cite>\"unif-hints"\.\ subsection \Simple Arithmetic\ datatype add_expr = Var int | Add add_expr add_expr fun eval_add_expr :: "add_expr \ int" where "eval_add_expr (Var i) = i" | "eval_add_expr (Add ex1 ex2) = eval_add_expr ex1 + eval_add_expr ex2" lemma eval_add_expr_Var [reify_uhint where prio = Prio.LOW]: "e \ Var i \ eval_add_expr e \ i" by simp lemma eval_add_expr_add [reify_uhint]: "e \ Add e1 e2 \ eval_add_expr e1 \ m \ eval_add_expr e2 \ n \ eval_add_expr e \ m + n" by simp ML_command\ val t1 = Proof_Context.read_term_pattern @{context} "eval_add_expr ?e" val t2 = Proof_Context.read_term_pattern @{context} "1 + (2 + 7) :: int" val _ = Unification_Util.log_unif_results @{context} (t1, t2) (reify_unify []) \ schematic_goal "eval_add_expr ?e = (1 + (2 + 7) :: int)" by (urule refl where unifier = reify_unify) subsection \Arithmetic with Environment\ datatype mul_expr = Unit | Var nat | Mul mul_expr mul_expr | Inv mul_expr fun eval_mul_expr :: "mul_expr \ rat list \ rat" where "eval_mul_expr (Unit, \) = 1" | "eval_mul_expr (Var i, \) = \ ! i" | "eval_mul_expr (Mul e1 e2, \) = eval_mul_expr (e1, \) * eval_mul_expr (e2, \)" | "eval_mul_expr (Inv e, \) = inverse (eval_mul_expr (e, \))" text \Split @{term e} into an expression and an environment.\ lemma [reify_uhint where prio = Prio.VERY_LOW]: "e \ (e1, \) \ eval_mul_expr (e1, \) \ n \ eval_mul_expr e \ n" by simp text \Hints for environment lookup.\ lemma [reify_uhint where prio = Prio.LOW]: "e \ Var (Suc p) \ \ \ s # \ \ n \ eval_mul_expr (Var p, \) \ eval_mul_expr (e, \) \ n" by simp lemma [reify_uhint]: "e \ Var 0 \ \ \ n # \ \ eval_mul_expr (e, \) \ n" by simp lemma [reify_uhint]: "e1 \ Inv e2 \ n \ eval_mul_expr (e2, \) \ eval_mul_expr (e1, \) \ inverse n" "e \ Mul e1 e2 \ m \ eval_mul_expr (e1, \) \ n \ eval_mul_expr (e2, \) \ eval_mul_expr (e, \) \ m * n" "e \ Unit \ eval_mul_expr (e, \) \ 1" by simp_all ML_command\ val t1 = Proof_Context.read_term_pattern @{context} "eval_mul_expr ?e" val t2 = Proof_Context.read_term_pattern @{context} "1 * inverse 3 * 5 :: rat" val _ = Unification_Util.log_unif_results' 1 @{context} (t2, t1) (reify_unify []) \ schematic_goal "eval_mul_expr ?e = (1 * inverse 3 * 5 :: rat)" by (ufact refl where unifier = reify_unify) end diff --git a/thys/ML_Unification/Logger/ML_Logger_Examples.thy b/thys/ML_Unification/Logger/ML_Logger_Examples.thy --- a/thys/ML_Unification/Logger/ML_Logger_Examples.thy +++ b/thys/ML_Unification/Logger/ML_Logger_Examples.thy @@ -1,111 +1,111 @@ \<^marker>\creator "Kevin Kappelmann"\ subsection \Examples\ theory ML_Logger_Examples imports ML_Logger Setup_Result_Commands begin text \First some simple, barebone logging: print some information.\ ML_command\ \ \the following two are equivalent\ - val _ = Logger.log Logger.root_logger Logger.INFO @{context} (K "hello root logger") - val _ = @{log Logger.INFO Logger.root_logger} @{context} (K "hello root logger") + val _ = Logger.log Logger.root Logger.INFO @{context} (K "hello root logger") + val _ = @{log Logger.INFO Logger.root} @{context} (K "hello root logger") \ ML_command\ - val logger = Logger.root_logger + val logger = Logger.root val _ = @{log} @{context} (K "hello root logger") \ \\@{log}\ is equivalent to \Logger.log logger Logger.INFO\\ \ text \To guarantee the existence of a "logger" in an ML structure, one should use the \HAS_LOGGER\ signature.\ ML\ structure My_Struct : sig include HAS_LOGGER val get_n : Proof.context -> int end = struct - val logger = Logger.setup_new_logger Logger.root_logger "My_Struct" + val logger = Logger.setup_new_logger Logger.root "My_Struct" fun get_n ctxt = (@{log} ctxt (K "retrieving n..."); 42) end \ ML_command\val n = My_Struct.get_n @{context}\ text\We can set up a hierarchy of loggers\ ML\ - val logger = Logger.root_logger - val parent1 = Logger.setup_new_logger Logger.root_logger "Parent1" + val logger = Logger.root + val parent1 = Logger.setup_new_logger Logger.root "Parent1" val child1 = Logger.setup_new_logger parent1 "Child1" val child2 = Logger.setup_new_logger parent1 "Child2" - val parent2 = Logger.setup_new_logger Logger.root_logger "Parent2" + val parent2 = Logger.setup_new_logger Logger.root "Parent2" \ ML_command\ - (@{log Logger.INFO Logger.root_logger} @{context} (K "Hello root logger"); + (@{log Logger.INFO Logger.root} @{context} (K "Hello root logger"); @{log Logger.INFO parent1} @{context} (K "Hello parent1"); @{log Logger.INFO child1} @{context} (K "Hello child1"); @{log Logger.INFO child2} @{context} (K "Hello child2"); @{log Logger.INFO parent2} @{context} (K "Hello parent2")) \ text \We can use different log levels to show/surpress messages. The log levels are based on Apache's Log4J 2 \<^url>\https://logging.apache.org/log4j/2.x/manual/customloglevels.html\.\ ML_command\@{log Logger.DEBUG parent1} @{context} (K "Hello parent1")\ \\prints nothings\ declare [[ML_map_context \Logger.set_log_level parent1 Logger.DEBUG\]] ML_command\@{log Logger.DEBUG parent1} @{context} (K "Hello parent1")\ \\prints message\ ML_command\Logger.ALL\ \\ctrl+click on the value to see all log levels\ text \We can set options for all loggers below a given logger. Below, we set the log level for all loggers below (and including) \<^ML>\parent1\ to error, thus disabling warning messages.\ ML_command\ (@{log Logger.WARN parent1} @{context} (K "Warning from parent1"); @{log Logger.WARN child1} @{context} (K "Warning from child1")) \ declare [[ML_map_context \Logger.set_log_levels parent1 Logger.ERR\]] ML_command\ (@{log Logger.WARN parent1} @{context} (K "Warning from parent1"); @{log Logger.WARN child1} @{context} (K "Warning from child1")) \ declare [[ML_map_context \Logger.set_log_levels parent1 Logger.INFO\]] text \We can set message filters.\ -declare [[ML_map_context \Logger.set_msg_filters Logger.root_logger (match_string "Third")\]] +declare [[ML_map_context \Logger.set_msg_filters Logger.root (match_string "Third")\]] ML_command\ (@{log Logger.INFO parent1} @{context} (K "First message"); @{log Logger.INFO child1} @{context} (K "Second message"); @{log Logger.INFO child2} @{context} (K "Third message"); @{log Logger.INFO parent2} @{context} (K "Fourth message")) \ -declare [[ML_map_context \Logger.set_msg_filters Logger.root_logger (K true)\]] +declare [[ML_map_context \Logger.set_msg_filters Logger.root (K true)\]] text \One can also use different output channels (e.g. files) and hide/show some additional logging information. Ctrl+click on below values and explore.\ ML_command\Logger.set_output; Logger.set_show_logger; Logging_Antiquotation.show_log_pos\ text \To set up (local) loggers outside ML environments, @{theory ML_Unification.Setup_Result_Commands} contains two commands, @{command setup_result} and @{command local_setup_result}.\ experiment begin -local_setup_result local_logger = \Logger.new_logger Logger.root_logger "Local"\ +local_setup_result local_logger = \Logger.new_logger Logger.root "Local"\ ML_command\@{log Logger.INFO local_logger} @{context} (K "Hello local world")\ end text \\local_logger\ is no longer available. The follow thus does not work:\ \ \ML_command\@{log Logger.INFO local_logger} @{context} (K "Hello local world")\\ text \Let us create another logger in the global context.\ -setup_result some_logger = \Logger.new_logger Logger.root_logger "Some_Logger"\ +setup_result some_logger = \Logger.new_logger Logger.root "Some_Logger"\ ML_command\@{log Logger.INFO some_logger} @{context} (K "Hello world")\ text \Let us delete it again.\ declare [[ML_map_context \Logger.delete_logger some_logger\]] text \The logger can no longer be found in the logger hierarchy\ ML_command\@{log Logger.INFO some_logger} @{context} (K "Hello world")\ end diff --git a/thys/ML_Unification/Logger/logger.ML b/thys/ML_Unification/Logger/logger.ML --- a/thys/ML_Unification/Logger/logger.ML +++ b/thys/ML_Unification/Logger/logger.ML @@ -1,225 +1,228 @@ (* Title: Logger/logger.ML Author: Kevin Kappelmann, Paul Bachmann Hierarchical logger, indexed on bindings. The log levels are based on Apache's Log4J 2 https://logging.apache.org/log4j/2.x/manual/customloglevels.html *) signature LOGGER = sig type log_level = int val OFF : log_level val FATAL : log_level (*error log level*) val ERR : log_level val WARN : log_level val INFO : log_level val DEBUG : log_level val TRACE : log_level val ALL : log_level type logger_name = bstring type logger_binding - val root_logger : logger_binding + val root : logger_binding (*takes parent logger and creates a child logger with the given name*) val create_child_logger : logger_binding -> logger_name -> logger_binding val pretty_binding : logger_binding -> Pretty.T type logger_output = log_level -> string -> unit val default_output : logger_output type msg_filter = string -> bool type logger_config val config : logger_output -> log_level -> msg_filter -> bool -> logger_config val default_config : logger_config val set_config_output : logger_output -> logger_config -> logger_config val set_config_log_level : log_level -> logger_config -> logger_config (*set message filter: only log messages with positive results*) val set_config_msg_filter : msg_filter -> logger_config -> logger_config (*set whether to print the logger's name when logging*) val set_config_show_logger : bool -> logger_config -> logger_config val lookup_logger : logger_binding -> Context.generic -> logger_config option val insert_logger : (logger_binding * logger_config) -> Context.generic -> Context.generic val insert_logger_safe : (logger_binding * logger_config) -> Context.generic -> Context.generic val delete_logger : logger_binding -> Context.generic -> Context.generic val cut_loggers : logger_binding -> Context.generic -> Context.generic val set_logger : logger_binding -> (logger_config -> logger_config) -> Context.generic -> Context.generic val set_loggers : logger_binding -> (logger_config -> logger_config) -> Context.generic -> Context.generic val set_output : logger_binding -> logger_output -> Context.generic -> Context.generic val set_outputs : logger_binding -> logger_output -> Context.generic -> Context.generic val set_log_level : logger_binding -> log_level -> Context.generic -> Context.generic val set_log_levels : logger_binding -> log_level -> Context.generic -> Context.generic val set_msg_filter : logger_binding -> msg_filter -> Context.generic -> Context.generic val set_msg_filters : logger_binding -> msg_filter -> Context.generic -> Context.generic val set_show_logger : logger_binding -> bool -> Context.generic -> Context.generic val set_show_loggers : logger_binding -> bool -> Context.generic -> Context.generic (*creates and inserts child logger with default configuration into context*) val new_logger : logger_binding -> logger_name -> Context.generic -> (logger_binding * Context.generic) (*registers new logger to background context*) val setup_new_logger : logger_binding -> logger_name -> logger_binding (*prints message created by passed function to the logger's output if the logger's log_level is greater or equal than the passed level and the message filter answers positively; uses lazy computation of the message to avoid computations in case the log level blocks the logging.*) val log : logger_binding -> log_level -> Proof.context -> (unit -> string) -> unit (* logging functions for different log levels *) val fatal : logger_binding -> Proof.context -> (unit -> string) -> unit val err : logger_binding -> Proof.context -> (unit -> string) -> unit val warn : logger_binding -> Proof.context -> (unit -> string) -> unit val info : logger_binding -> Proof.context -> (unit -> string) -> unit val debug : logger_binding -> Proof.context -> (unit -> string) -> unit val trace : logger_binding -> Proof.context -> (unit -> string) -> unit end structure Logger : LOGGER = struct type log_level = int (*values for different log levels*) val OFF = 0 val FATAL = 100 val ERR = 200 val WARN = 300 val INFO = 400 val DEBUG = 500 val TRACE = 600 val ALL = 1000 type logger_name = bstring datatype logger_binding = Logger_Binding of binding fun binding_of (Logger_Binding binding) = binding -val root_logger_name = "Root" -val root_logger = Binding.name root_logger_name |> Logger_Binding +val root_name = "Root" +val root = Binding.name root_name |> Logger_Binding fun create_child_logger (Logger_Binding parent) child_name = let val child = Binding.qualify_name true parent child_name in if Symbol_Pos.is_identifier (Binding.name_of child) then Logger_Binding child else error ("Bad identifier for child logger " ^ ML_Syntax.print_string child_name) end val pretty_binding = Binding.pretty o binding_of type logger_output = log_level -> string -> unit fun default_output log_level = if log_level <= WARN then warning else if log_level < DEBUG then writeln else tracing type msg_filter = string -> bool type logger_config = { log_level : log_level, msg_filter : msg_filter, output : logger_output, show_logger : bool } fun config output log_level msg_filter show_logger = {log_level = log_level, output = output, msg_filter = msg_filter, show_logger = show_logger} val default_config = config default_output INFO (K true) true fun set_config_log_level log_level {output, show_logger, msg_filter,...} = {log_level = log_level, output = output, msg_filter = msg_filter, show_logger = show_logger} fun set_config_output output {log_level, show_logger, msg_filter,...} = {log_level = log_level, output = output, msg_filter = msg_filter, show_logger = show_logger} fun set_config_msg_filter msg_filter {output, log_level, show_logger,...} = {log_level = log_level, output = output, msg_filter = msg_filter, show_logger = show_logger} fun set_config_show_logger show_logger {output, log_level, msg_filter,...} = {log_level = log_level, output = output, msg_filter = msg_filter, show_logger = show_logger} fun log_config binding {log_level, output, msg_filter, show_logger} level message_f = if level > log_level then () else let val msg = message_f () - |> show_logger ? (fn msg => cat_lines [ - "Logger: " ^ Pretty.string_of (pretty_binding binding), - msg]) + |> show_logger ? (fn msg => Pretty.fbreaks [ + Pretty.block [Pretty.str "Logger: ", pretty_binding binding], + Pretty.str msg + ] |> Pretty.block |> Pretty.string_of) in if msg_filter msg then output level msg else () end structure BT = Binding_Tree -val init_tree = BT.insert (binding_of root_logger, default_config) BT.empty +val init_tree = BT.insert (binding_of root, default_config) BT.empty structure Logger_Data = Generic_Data( type T = logger_config BT.binding_tree val empty = init_tree val merge = BT.merge ) fun lookup_logger (Logger_Binding binding) = Logger_Data.get #> (fn bt => BT.lookup bt binding) fun insert_logger bcp context = (Logger_Data.map (apfst binding_of bcp |> BT.insert) context) handle (exn as BT.INSERT) => (warning (Pretty.block [ Pretty.str "Logger ", pretty_binding (fst bcp), Pretty.str " already added." ] |> Pretty.string_of); Exn.reraise exn) fun insert_logger_safe bcp = Logger_Data.map (apfst binding_of bcp |> BT.insert_safe) fun delete_logger (Logger_Binding binding) = Logger_Data.map (BT.delete_safe binding) fun cut_loggers (Logger_Binding binding) = Logger_Data.map (BT.cut_safe binding) fun set_logger (Logger_Binding binding) f = Logger_Data.map (BT.map binding (Option.map f)) fun set_loggers (Logger_Binding binding) f = Logger_Data.map (BT.map_below binding (Option.map f)) fun set_output binding = set_logger binding o set_config_output fun set_outputs binding = set_loggers binding o set_config_output fun set_log_level binding = set_logger binding o set_config_log_level fun set_log_levels binding = set_loggers binding o set_config_log_level fun set_msg_filter binding = set_logger binding o set_config_msg_filter fun set_msg_filters binding = set_loggers binding o set_config_msg_filter fun set_show_logger binding = set_logger binding o set_config_show_logger fun set_show_loggers binding = set_loggers binding o set_config_show_logger fun new_logger parent child_name context = let val child = create_child_logger parent child_name in (child, insert_logger (child, default_config) context) end fun setup_new_logger parent child_name = Context.>>> (new_logger parent child_name) fun log binding log_level ctxt message_f = case lookup_logger binding (Context.Proof ctxt) of SOME config => log_config binding config log_level message_f | NONE => - let fun warn_msg _ = - "Logger " ^ Pretty.string_of (pretty_binding binding) ^ " not found." + let fun warn_msg _ = Pretty.fbreaks [ + Pretty.block [Pretty.str "Logger ", pretty_binding binding, Pretty.str " not found."], + Pretty.block (map Pretty.str ["Passed message: ", message_f ()]) + ] |> Pretty.block |> Pretty.string_of in - if binding = root_logger + if binding = root then default_output WARN (warn_msg ()) - else log root_logger WARN ctxt warn_msg + else log root WARN ctxt warn_msg end fun fatal binding = log binding FATAL fun err binding = log binding ERR fun warn binding = log binding WARN fun info binding = log binding INFO fun debug binding = log binding DEBUG fun trace binding = log binding TRACE end (*structures that use a logger should implement this signature*) signature HAS_LOGGER = sig val logger : Logger.logger_binding end diff --git a/thys/ML_Unification/ML_Unification_HOL_Setup.thy b/thys/ML_Unification/ML_Unification_HOL_Setup.thy --- a/thys/ML_Unification/ML_Unification_HOL_Setup.thy +++ b/thys/ML_Unification/ML_Unification_HOL_Setup.thy @@ -1,36 +1,34 @@ \<^marker>\creator "Kevin Kappelmann"\ section \Setup for HOL\ theory ML_Unification_HOL_Setup imports HOL.HOL ML_Unification_Hints begin lemma eq_eq_True: "P \ (P \ Trueprop True)" by standard+ simp_all declare [[uhint where hint_preprocessor = \Unification_Hints_Base.obj_logic_hint_preprocessor @{thm atomize_eq[symmetric]} (Conv.rewr_conv @{thm eq_eq_True})\]] and [[rec_uhint where hint_preprocessor = \Unification_Hints_Base.obj_logic_hint_preprocessor @{thm atomize_eq[symmetric]} (Conv.rewr_conv @{thm eq_eq_True})\]] lemma eq_TrueI: "PROP P \ PROP P \ Trueprop True" by (standard) simp declare [[ucombine add = \Standard_Unification_Combine.eunif_data (Simplifier_Unification.SIMPS_TO_unify @{thm eq_TrueI} |> Unification_Combinator.norm_closed_unifier (#norm_term Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify) |> Unification_Combinator.unifier_from_closed_unifier |> K) - (Standard_Unification_Combine.default_metadata \<^binding>\SIMPS_TO_unif\)\]] + (Standard_Unification_Combine.metadata \<^binding>\SIMPS_TO_unif\ Prio.HIGH)\]] declare [[ucombine add = \Standard_Unification_Combine.eunif_data - (Simplifier_Unification.SIMPS_TO_UNIF_unify @{thm eq_TrueI} + (Simplifier_Unification.simp_unify_progress Envir.aeconv + (Simplifier_Unification.SIMPS_TO_UNIF_unify @{thm eq_TrueI}) + (#norm_term Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify) Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify - (Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify - |> Unification_Combinator.norm_unifier Envir_Normalisation.beta_norm_term_unif) - |> Unification_Combinator.norm_closed_unifier - (#norm_term Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify) - |> Unification_Combinator.unifier_from_closed_unifier + Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify |> K) - (Standard_Unification_Combine.default_metadata \<^binding>\SIMPS_TO_UNIF_unif\)\]] + (Standard_Unification_Combine.metadata \<^binding>\SIMPS_TO_UNIF_unif\ Prio.HIGH)\]] end diff --git a/thys/ML_Unification/ML_Utils/Tactics/tactic_util.ML b/thys/ML_Unification/ML_Utils/Tactics/tactic_util.ML --- a/thys/ML_Unification/ML_Utils/Tactics/tactic_util.ML +++ b/thys/ML_Unification/ML_Utils/Tactics/tactic_util.ML @@ -1,260 +1,266 @@ (* Title: ML_Utils/tactic_util.ML Author: Kevin Kappelmann Tactic utilities. *) signature TACTIC_UTIL = sig include HAS_LOGGER (* tactic combinators*) val HEADGOAL : (int -> 'a) -> 'a + val THEN : ('a -> 'b Seq.seq) * ('b -> 'c Seq.seq) -> 'a -> 'c Seq.seq + val THEN' : ('a -> 'b -> 'c Seq.seq) * ('a -> 'c -> 'd Seq.seq) -> 'a -> 'b -> 'd Seq.seq + val TRY' : ('a -> tactic) -> 'a -> tactic val EVERY_ARG : ('a -> tactic) -> 'a list -> tactic val EVERY_ARG' : ('a -> 'b -> tactic) -> 'a list -> 'b -> tactic val CONCAT : tactic list -> tactic val CONCAT' : ('a -> tactic) list -> 'a -> tactic val FOCUS_PARAMS' : (Subgoal.focus -> int -> tactic) -> Proof.context -> int -> tactic val FOCUS_PARAMS_CTXT : (Proof.context -> tactic) -> Proof.context -> int -> tactic val FOCUS_PARAMS_CTXT' : (Proof.context -> int -> tactic) -> Proof.context -> int -> tactic val FOCUS_PARAMS_FIXED' : (Subgoal.focus -> int -> tactic) -> Proof.context -> int -> tactic val FOCUS_PREMS' : (Subgoal.focus -> int -> tactic) -> Proof.context -> int -> tactic val FOCUS' : (Subgoal.focus -> int -> tactic) -> Proof.context -> int -> tactic val SUBPROOF' : (Subgoal.focus -> int -> tactic) -> Proof.context -> int -> tactic val CSUBGOAL_DATA : (cterm -> 'a) -> ('a -> int -> tactic) -> int -> tactic val CSUBGOAL_STRIPPED : (cterm Binders.binders * (cterm list * cterm) -> 'a) -> ('a -> int -> tactic) -> int -> tactic val SUBGOAL_DATA : (term -> 'a) -> ('a -> int -> tactic) -> int -> tactic val SUBGOAL_STRIPPED : ((string * typ) list * (term list * term) -> 'a) -> ('a -> int -> tactic) -> int -> tactic (* tactics *) val insert_tac : thm list -> Proof.context -> int -> tactic (*thin_tac n i deletes nth premise of the ith subgoal*) val thin_tac : int -> int -> tactic val thin_tacs : int list -> int -> tactic val set_kernel_ho_unif_bounds : int -> int -> Proof.context -> Proof.context val silence_kernel_ho_bounds_exceeded : Proof.context -> Proof.context val safe_simp_tac : Proof.context -> int -> tactic (*nth_eq_assume_tac n i solves ith subgoal by assumption, without unification, preferring premise n*) val nth_eq_assume_tac : int -> int -> tactic (*resolution without lifting of premises and parameters*) val no_lift_biresolve_tac : bool -> thm -> int -> Proof.context -> int -> tactic val no_lift_resolve_tac : thm -> int -> Proof.context -> int -> tactic val no_lift_eresolve_tac : thm -> int -> Proof.context -> int -> tactic (*rewrite subgoal according to the given equality theorem "lhs \ subgoal"*) val rewrite_subgoal_tac : thm -> Proof.context -> int -> tactic (*creates equality theorem for a subgoal from an equality theorem for the subgoal's conclusion; fails if the equality theorem contains a tpair or implicit Assumption mentioning one of the bound variables*) val eq_subgoal_from_eq_concl : cterm Binders.binders -> cterm list -> thm -> Proof.context -> thm option (*rewrite a subgoal given an equality theorem and environment for the subgoal's conclusion*) val rewrite_subgoal_unif_concl : Envir_Normalisation.term_normaliser -> Envir_Normalisation.thm_normaliser -> Envir_Normalisation.thm_normaliser -> term Binders.binders -> (Envir.env * thm) -> Proof.context -> int -> tactic (*protect premises starting from (and excluding) the given index in the specified subgoal*) val protect_tac : int -> Proof.context -> int -> tactic (*unprotect the subgoal*) val unprotect_tac : Proof.context -> int -> tactic (*protect, then apply tactic, then unprotect*) val protected_tac : int -> (int -> tactic) -> Proof.context -> int -> tactic (*focus on the specified subgoal, introducing only the specified list of premises as theorems in the focus*) val focus_prems_tac : int list -> (Subgoal.focus -> int -> tactic) -> Proof.context -> int -> tactic (*focus_prems_tac and then deletes all focused premises*) val focus_delete_prems_tac : int list -> (Subgoal.focus -> int -> tactic) -> Proof.context -> int -> tactic (*apply tactic to the specified subgoal, where the state is given as a cterm*) val apply_tac : (int -> tactic) -> int -> cterm -> thm Seq.seq end structure Tactic_Util : TACTIC_UTIL = struct -val logger = Logger.setup_new_logger Logger.root_logger "Tactic_Util" +val logger = Logger.setup_new_logger Logger.root "Tactic_Util" (* tactic combinators *) fun HEADGOAL f = f 1 +fun (tac1 THEN tac2) = Seq.THEN (tac1, tac2) +fun (tac1 THEN' tac2) x = tac1 x THEN tac2 x + fun TRY' tac = tac ORELSE' K all_tac fun EVERY_ARG tac = EVERY o map tac fun EVERY_ARG' tac = EVERY' o map tac fun CONCAT tacs = fold_rev (curry op APPEND) tacs no_tac fun CONCAT' tacs = fold_rev (curry op APPEND') tacs (K no_tac) fun FOCUS_PARAMS' tac = Subgoal.FOCUS_PARAMS (HEADGOAL o tac) fun FOCUS_PARAMS_FIXED' tac = Subgoal.FOCUS_PARAMS_FIXED (HEADGOAL o tac) fun FOCUS_PREMS' tac = Subgoal.FOCUS_PREMS (HEADGOAL o tac) fun FOCUS' tac = Subgoal.FOCUS (HEADGOAL o tac) fun SUBPROOF' tac = Subgoal.SUBPROOF (HEADGOAL o tac) fun FOCUS_PARAMS_CTXT tac = Subgoal.FOCUS_PARAMS (#context #> tac) fun FOCUS_PARAMS_CTXT' tac = FOCUS_PARAMS' (#context #> tac) fun CSUBGOAL_DATA f tac = CSUBGOAL (uncurry tac o apfst f) fun CSUBGOAL_STRIPPED f = CSUBGOAL_DATA (f o Term_Util.strip_csubgoal) fun SUBGOAL_DATA f tac = SUBGOAL (uncurry tac o apfst f) fun SUBGOAL_STRIPPED f = SUBGOAL_DATA (f o Term_Util.strip_subgoal) (* tactics *) fun insert_tac thms ctxt = Method.insert_tac ctxt thms fun thin_tac n = if n < 1 then K no_tac else rotate_tac (n - 1) THEN' (DETERM o eresolve0_tac [thin_rl]) THEN' rotate_tac (~n + 1) val thin_tacs = sort int_ord #> map_index ((op -) o swap) #> EVERY_ARG' thin_tac fun set_kernel_ho_unif_bounds trace_bound search_bound = Config.put Unify.trace_bound trace_bound #> Config.put Unify.search_bound search_bound val silence_kernel_ho_bounds_exceeded = Context_Position.set_visible false (*some "safe" solvers create instantiations via flex-flex pairs, which we disallow here*) fun safe_simp_tac ctxt i state = let val eq_flexps = Thm.tpairs_of #> pair (Thm.tpairs_of state) #> eq_list (eq_pair (op aconv) (op aconv)) (*stop higher-order unifier from entering difficult unification problems*) val ctxt = set_kernel_ho_unif_bounds 1 1 ctxt |> silence_kernel_ho_bounds_exceeded in Simplifier.safe_simp_tac ctxt i state |> Seq.filter eq_flexps end fun nth_eq_assume_tac n = if n < 1 then K no_tac else rotate_tac (n - 1) THEN' eq_assume_tac (*resolution*) fun no_lift_biresolve_tac eres brule nsubgoals ctxt = Thm.bicompose (SOME ctxt) {flatten = true, match = false, incremented = true} (eres, brule, nsubgoals) #> PRIMSEQ val no_lift_resolve_tac = no_lift_biresolve_tac false val no_lift_eresolve_tac = no_lift_biresolve_tac true fun rewrite_subgoal_tac eq_thm = let val (lhs, rhs) = Thm.dest_equals (Thm.cconcl_of eq_thm) val eq_elim = Thm.instantiate' [] [SOME lhs, SOME rhs] Drule.equal_elim_rule1 val thm = Thm.implies_elim eq_elim eq_thm in no_lift_resolve_tac thm 1 end fun eq_subgoal_from_eq_concl cbinders cprems eq_thm ctxt = let fun all_abstract ((x, T), cfree) = let val all_const = Logic.all_const T |> Thm.cterm_of ctxt in Thm_Util.abstract_rule ctxt x cfree #> Option.map (Drule.arg_cong_rule all_const) end in (*introduce the premises*) SOME (fold_rev (Drule.imp_cong_rule o Thm.reflexive) cprems eq_thm) (*introduce abstractions for the fresh Frees*) |> fold (Option.mapPartial o all_abstract) cbinders end fun rewrite_subgoal_unif_concl norm_binders norm_state norm_eq_thm binders (env_eq_thm as (env, _)) ctxt i = let (*updates binders with their normalised type*) fun updated_binders env = Binders.norm_binders (norm_binders env) binders val binders = updated_binders env fun smash_tpairs_if_occurs (_, bvar) = Seq.maps (Unification_Util.smash_tpairs_if_occurs ctxt bvar) - fun rewrite_tac env eq_thm (rparams, (prems, _)) = + fun rewrite_tac env eq_thm prems = let val binders = updated_binders env val cterm_of = Thm.cterm_of ctxt val cprems = map (cterm_of o Binders.replace_binders binders) prems in eq_subgoal_from_eq_concl (map (apsnd cterm_of) binders) cprems (norm_eq_thm ctxt env eq_thm) ctxt |> Option.map (fn eq_thm => rewrite_subgoal_tac eq_thm ctxt) |> the_default (K no_tac) end in (*smash tpairs in case some binder occurs*) fold smash_tpairs_if_occurs binders (Seq.single env_eq_thm) |> Seq.lifts (fn (env, eq_thm) => PRIMITIVE (norm_state ctxt env) - THEN SUBGOAL_STRIPPED I (rewrite_tac env eq_thm) i) + THEN SUBGOAL_STRIPPED (fst o snd) (rewrite_tac env eq_thm) i) end fun protect_tac n ctxt = let fun protect cbinders (cprems, cconcl) i = let val nprems = length cprems in if nprems < n then (@{log Logger.WARN} ctxt (fn _ => Pretty.block [ Pretty.str "Not enough premises to protect. Given number: ", Pretty.str (string_of_int n), Pretty.str ". But there are only ", Pretty.str (string_of_int nprems), Pretty.str " premise(s) in subgoal ", Pretty.str (string_of_int i), Pretty.str ": ", Logic.close_prop (map (apfst fst o apsnd Thm.term_of) cbinders) (map Thm.term_of cprems) (Thm.term_of cconcl) |> Syntax.pretty_term ctxt ] |> Pretty.string_of); no_tac) else let val (cprems_unprotected, cconcl_protected) = chop n cprems ||> Drule.list_implies o rpair cconcl in @{thm Pure.prop_def} |> Thm.instantiate' [] [SOME cconcl_protected] |> (fn eq_thm => eq_subgoal_from_eq_concl cbinders cprems_unprotected eq_thm ctxt) |> Option.map (fn protected_eq_thm => rewrite_subgoal_tac protected_eq_thm ctxt i) |> the_default no_tac end end in if n < 0 then K no_tac else CSUBGOAL_STRIPPED I (uncurry protect) end fun unprotect_tac ctxt = match_tac ctxt [Drule.protectI] fun protected_tac n tac ctxt = protect_tac n ctxt THEN' tac THEN_ALL_NEW (unprotect_tac ctxt) fun focus_prems_tac is tac ctxt = CONVERSION (Conversion_Util.move_prems_to_front_conv is) THEN' protect_tac (length is) ctxt THEN' FOCUS_PREMS' (fn {context=ctxt, params=params, prems=prems, asms=asms, concl=concl, schematics=schematics} => let val concl = Term_Util.unprotect concl in unprotect_tac ctxt THEN' tac {context=ctxt, params=params, prems=prems, asms=asms, concl=concl, schematics=schematics} end ) ctxt fun focus_delete_prems_tac is tac ctxt = focus_prems_tac is tac ctxt THEN' thin_tacs (1 upto length is) fun apply_tac tac i = Goal.init #> tac i #> Seq.map Goal.conclude end diff --git a/thys/ML_Unification/ML_Utils/Theorems/thm_util.ML b/thys/ML_Unification/ML_Utils/Theorems/thm_util.ML --- a/thys/ML_Unification/ML_Utils/Theorems/thm_util.ML +++ b/thys/ML_Unification/ML_Utils/Theorems/thm_util.ML @@ -1,47 +1,47 @@ (* Title: ML_Utils/thm_util.ML Author: Kevin Kappelmann Theorem utilities. *) signature THM_UTIL = sig include HAS_LOGGER val pretty_THM : Proof.context -> string * int * thm list -> Pretty.T (*fails if the equality theorem contains a tpair or implicit Assumption mentioning the bound variables*) val abstract_rule : Proof.context -> string -> cterm -> thm -> thm option (*"match_implies_elim prem thm" matches the first premise of thm against prem and then removes the premise; without lifting*) val match_implies_elim : thm -> thm -> thm val protect : thm -> thm end structure Thm_Util : THM_UTIL = struct -val logger = Logger.setup_new_logger Logger.root_logger "Thm_Util" +val logger = Logger.setup_new_logger Logger.root "Thm_Util" fun pretty_THM ctxt (msg, subgoal, thms) = Pretty.block [ Pretty.str msg, Pretty.fbrk, Pretty.str "Subgoal number ", Pretty.str (string_of_int subgoal), Pretty.fbrk, Pretty.str "Theorems ", Pretty.list "[" "]" (map (Thm.pretty_thm ctxt) thms) ] fun abstract_rule ctxt n ct thm = SOME (Thm.abstract_rule n ct thm) handle THM data => (@{log Logger.DEBUG} ctxt (fn _ => pretty_THM ctxt data |> Pretty.string_of); NONE) fun match_implies_elim prem thm = Thm.instantiate (Thm.first_order_match (Thm.cprem_of thm 1, Thm.cprop_of prem)) thm |> (fn thm => Thm.implies_elim thm prem) fun protect thm = Drule.protectI |> Thm.instantiate (TVars.empty, Vars.make [((("A", 0), propT), Thm.cprop_of thm)]) |> Thm.elim_implies thm end diff --git a/thys/ML_Unification/Normalisations/envir_normalisation.ML b/thys/ML_Unification/Normalisations/envir_normalisation.ML --- a/thys/ML_Unification/Normalisations/envir_normalisation.ML +++ b/thys/ML_Unification/Normalisations/envir_normalisation.ML @@ -1,244 +1,244 @@ (* Title: ML_Unification/envir_normalisation.ML Author: Kevin Kappelmann Normalisations with respect to an environment. Adapted and generalised from envir.ML *) signature ENVIR_NORMALISATION = sig (* types *) type type_normaliser = Type.tyenv -> typ -> typ val norm_type_match : type_normaliser val norm_type_unif : type_normaliser (* terms *) type term_type_normaliser = Type.tyenv -> Term_Normalisation.term_normaliser val norm_term_types : type_normaliser -> term_type_normaliser type term_normaliser = Envir.env -> Term_Normalisation.term_normaliser (** matching **) val norm_term_types_match : term_type_normaliser (*without beta-normalisation*) val norm_term_match : term_normaliser val eta_short_norm_term_match : term_normaliser (*with beta-normalisation*) val beta_norm_term_match : term_normaliser val beta_eta_short_norm_term_match : term_normaliser (** unification **) val norm_term_types_unif : term_type_normaliser (*without beta-normalisation*) val norm_term_unif : term_normaliser val eta_short_norm_term_unif : term_normaliser (*with beta-normalisation*) val beta_norm_term_unif : term_normaliser val beta_eta_short_norm_term_unif : term_normaliser (* theorems *) type thm_normaliser = Proof.context -> Envir.env -> thm -> thm type thm_type_normaliser = thm_normaliser - val norm_thm_types : type_normaliser -> thm_normaliser - val norm_thm_types_match : thm_normaliser - val norm_thm_types_unif : thm_normaliser + val norm_thm_types : type_normaliser -> thm_type_normaliser + val norm_thm_types_match : thm_type_normaliser + val norm_thm_types_unif : thm_type_normaliser val norm_thm : type_normaliser -> term_normaliser -> thm_normaliser val norm_thm_match : thm_normaliser val norm_thm_unif : thm_normaliser val eta_short_norm_thm : type_normaliser -> term_normaliser -> thm_normaliser val eta_short_norm_thm_match : thm_normaliser val eta_short_norm_thm_unif : thm_normaliser (*with beta-normalisation and eta-contraction*) val beta_eta_short_norm_thm : type_normaliser -> term_normaliser -> thm_normaliser val beta_eta_short_norm_thm_match : thm_normaliser val beta_eta_short_norm_thm_unif : thm_normaliser end structure Envir_Normalisation : ENVIR_NORMALISATION = struct (* types *) type type_normaliser = Type.tyenv -> typ -> typ val norm_type_match = Envir.subst_type val norm_type_unif = Envir.norm_type (* terms *) type term_type_normaliser = Type.tyenv -> Term_Normalisation.term_normaliser fun norm_term_types norm_type = map_types o norm_type type term_normaliser = Envir.env -> Term_Normalisation.term_normaliser (** matching **) val norm_term_types_match = norm_term_types norm_type_match fun norm_abs_same2 normT norm (a, T, body) = Abs (a, normT T, Same.commit norm body) handle Same.SAME => Abs (a, T, norm body) fun norm_abs_comb_same beta_norm norm (abs_args as (_, _, body)) arg = if beta_norm then Same.commit norm (subst_bound (arg, body)) else let val f = Abs abs_args in (norm f $ Same.commit norm arg handle Same.SAME => f $ norm arg) end fun norm_comb_same beta_norm norm f t = (case norm f of (nf as Abs (_, _, body)) => if beta_norm then Same.commit norm (subst_bound (t, body)) else nf $ Same.commit norm t | nf => nf $ Same.commit norm t) handle Same.SAME => f $ norm t fun norm_term_match1 beta_norm tenv : term Same.operation = let fun norm (Var v) = (case Envir.lookup1 tenv v of SOME u => u | NONE => raise Same.SAME) | norm (Abs (a, T, body)) = Abs (a, T, norm body) | norm (Abs abs_args $ t) = norm_abs_comb_same beta_norm norm abs_args t | norm (f $ t) = norm_comb_same beta_norm norm f t | norm _ = raise Same.SAME in norm end fun norm_term_match2 beta_norm (Envir.Envir {tenv, tyenv, ...}) : term Same.operation = let val normT = Envir.subst_type_same tyenv fun norm (Const (a, T)) = Const (a, normT T) | norm (Free (a, T)) = Free (a, normT T) | norm (v as Var (xi, T)) = let fun lookup v = (case Envir.lookup1 tenv (dest_Var v) of SOME u => u | NONE => raise Same.SAME) in (normT T |> (fn T => Same.commit lookup (Var (xi, T)))) handle Same.SAME => lookup v end | norm (Abs args) = norm_abs_same2 normT norm args | norm (Abs abs_args $ t) = norm_abs_comb_same beta_norm norm abs_args t | norm (f $ t) = norm_comb_same beta_norm norm f t | norm _ = raise Same.SAME in norm end fun norm_term_match_same beta_norm (envir as Envir.Envir {tenv, tyenv, ...}) = if Vartab.is_empty tyenv then norm_term_match1 beta_norm tenv else norm_term_match2 beta_norm envir fun norm_term_match env = Same.commit (norm_term_match_same false env) fun beta_norm_term_match env = Same.commit (norm_term_match_same true env) fun eta_short_norm_term_match env = norm_term_match env #> Term_Normalisation.eta_short fun beta_eta_short_norm_term_match env = beta_norm_term_match env #> Term_Normalisation.eta_short (** unification **) val norm_term_types_unif = norm_term_types norm_type_unif fun norm_type_unif_same tyenv : typ Same.operation = let fun norm (Type (a, Ts)) = Type (a, Same.map norm Ts) | norm (TFree _) = raise Same.SAME | norm (TVar v) = (case Type.lookup tyenv v of SOME U => Same.commit norm U | NONE => raise Same.SAME) in norm end fun norm_term_unif1 beta_norm tenv : term Same.operation = let fun norm (Var v) = (case Envir.lookup1 tenv v of SOME u => Same.commit norm u | NONE => raise Same.SAME) | norm (Abs (a, T, body)) = Abs (a, T, norm body) | norm (Abs abs_args $ t) = norm_abs_comb_same beta_norm norm abs_args t | norm (f $ t) = norm_comb_same beta_norm norm f t | norm _ = raise Same.SAME in norm end fun norm_term_unif2 beta_norm (envir as Envir.Envir {tyenv, ...}) : term Same.operation = let val normT = norm_type_unif_same tyenv fun norm (Const (a, T)) = Const (a, normT T) | norm (Free (a, T)) = Free (a, normT T) | norm (Var (xi, T)) = (case Envir.lookup envir (xi, T) of SOME u => Same.commit norm u | NONE => Var (xi, normT T)) | norm (Abs args) = norm_abs_same2 normT norm args | norm (Abs abs_args $ t) = norm_abs_comb_same beta_norm norm abs_args t | norm (f $ t) = norm_comb_same beta_norm norm f t | norm _ = raise Same.SAME in norm end fun norm_term_unif_same beta_norm (envir as Envir.Envir {tenv, tyenv, ...}) = if Vartab.is_empty tyenv then norm_term_unif1 beta_norm tenv else norm_term_unif2 beta_norm envir fun norm_term_unif env = Same.commit (norm_term_unif_same false env) fun beta_norm_term_unif env = Same.commit (norm_term_unif_same true env) fun eta_short_norm_term_unif env = norm_term_unif env #> Term_Normalisation.eta_short fun beta_eta_short_norm_term_unif env = beta_norm_term_unif env #> Term_Normalisation.eta_short type thm_normaliser = Proof.context -> Envir.env -> thm -> thm type thm_type_normaliser = thm_normaliser (** theorems **) (*collect and normalise TVars of a term*) fun collect_norm_types norm_type ctxt tyenv t = let val norm_type' = norm_type tyenv fun prep_type_entry x = (x, Thm.ctyp_of ctxt (norm_type' (TVar x))) in fold_types (fold_atyps (fn TVar v => TVars.add (prep_type_entry v) | _ => I)) t |> TVars.build end (*collect and normalise Vars of a term*) fun collect_norm_terms norm_type norm_term ctxt (env as Envir.Envir {tyenv,...}) t = let val norm_type' = norm_type tyenv val norm_term' = norm_term env fun prep_term_entry (x as (n, T)) = ((n, norm_type' T), Thm.cterm_of ctxt (norm_term' (Var x))) in fold_aterms (fn Var v => Vars.add (prep_term_entry v) | _ => I) t |> Vars.build end (*normalise types of a theorem*) fun norm_thm_types norm_types ctxt (Envir.Envir {tyenv, ...}) thm = let val prop = Thm.full_prop_of thm val type_inst = collect_norm_types norm_types ctxt tyenv prop val inst = (type_inst, Vars.empty) in Thm.instantiate inst thm end val norm_thm_types_match = norm_thm_types norm_type_match val norm_thm_types_unif = norm_thm_types norm_type_unif (*normalise a theorem*) fun norm_thm norm_types norm_terms ctxt (env as Envir.Envir {tyenv,...}) thm = let val prop = Thm.full_prop_of thm val type_inst = collect_norm_types norm_types ctxt tyenv prop val term_inst = collect_norm_terms norm_types norm_terms ctxt env prop val inst = (type_inst, term_inst) in Thm.instantiate inst thm end val norm_thm_match = norm_thm norm_type_match norm_term_match val norm_thm_unif = norm_thm norm_type_unif norm_term_unif fun eta_short_norm_thm norm_types norm_terms ctxt env = norm_thm norm_types norm_terms ctxt env #> Conv.fconv_rule Thm.eta_conversion val eta_short_norm_thm_match = eta_short_norm_thm norm_type_match norm_term_match val eta_short_norm_thm_unif = eta_short_norm_thm norm_type_unif norm_term_unif fun beta_eta_short_norm_thm norm_types norm_terms ctxt env = norm_thm norm_types norm_terms ctxt env #> Conv.fconv_rule Drule.beta_eta_conversion val beta_eta_short_norm_thm_match = beta_eta_short_norm_thm norm_type_match norm_term_match val beta_eta_short_norm_thm_unif = beta_eta_short_norm_thm norm_type_unif norm_term_unif end diff --git a/thys/ML_Unification/Simps_To/Simps_To.thy b/thys/ML_Unification/Simps_To/Simps_To.thy --- a/thys/ML_Unification/Simps_To/Simps_To.thy +++ b/thys/ML_Unification/Simps_To/Simps_To.thy @@ -1,72 +1,70 @@ \<^marker>\creator "Kevin Kappelmann"\ section \Simps To\ theory Simps_To imports ML_Tactic_Utils - ML_Theorem_Utils - ML_Unification_Base Setup_Result_Commands begin paragraph \Summary\ text \Simple frameworks to ask for the simp-normal form of a term on the user-level.\ -setup_result simps_to_base_logger = \Logger.new_logger Logger.root_logger "Simps_To_Base"\ +setup_result simps_to_base_logger = \Logger.new_logger Logger.root "Simps_To_Base"\ paragraph \Using Simplification On Left Term\ definition "SIMPS_TO s t \ (s \ t)" lemma SIMPS_TO_eq: "SIMPS_TO s t \ (s \ t)" unfolding SIMPS_TO_def by simp text \Prevent simplification of second/right argument\ lemma SIMPS_TO_cong [cong]: "s \ s' \ SIMPS_TO s t \ SIMPS_TO s' t" by simp lemma SIMPS_TOI: "PROP SIMPS_TO s s" unfolding SIMPS_TO_eq by simp lemma SIMPS_TOD: "PROP SIMPS_TO s t \ s \ t" unfolding SIMPS_TO_eq by simp ML_file\simps_to.ML\ paragraph \Using Simplification On Left Term Followed By Unification\ definition "SIMPS_TO_UNIF s t \ (s \ t)" text \Prevent simplification\ lemma SIMPS_TO_UNIF_cong [cong]: "SIMPS_TO_UNIF s t \ SIMPS_TO_UNIF s t" by simp lemma SIMPS_TO_UNIF_eq: "SIMPS_TO_UNIF s t \ (s \ t)" unfolding SIMPS_TO_UNIF_def by simp lemma SIMPS_TO_UNIFI: "PROP SIMPS_TO s s' \ s' \ t \ PROP SIMPS_TO_UNIF s t" unfolding SIMPS_TO_UNIF_eq SIMPS_TO_eq by simp lemma SIMPS_TO_UNIFD: "PROP SIMPS_TO_UNIF s t \ s \ t" unfolding SIMPS_TO_UNIF_eq by simp ML_file\simps_to_unif.ML\ paragraph \Examples\ experiment begin lemma assumes [simp]: "P \ Q" and [simp]: "Q \ R" shows "PROP SIMPS_TO P Q" apply simp \\Note: only the left-hand side is simplified.\ ML_command\ \\obtaining the normal form theorem for a term in ML\ Simps_To.SIMPS_TO_thm_resultsq (simp_tac @{context}) @{context} @{cterm P} |> Seq.list_of |> map @{print} \ oops schematic_goal assumes [simp]: "P \ Q" and [simp]: "Q \ R" shows "PROP SIMPS_TO P ?Q" - apply simp - by (rule SIMPS_TOI) + by (tactic \Simps_To.SIMPS_TO_tac (Simps_To.simp_inst_tac (simp_tac @{context})) @{context} 1\) + end end diff --git a/thys/ML_Unification/Simps_To/simps_to.ML b/thys/ML_Unification/Simps_To/simps_to.ML --- a/thys/ML_Unification/Simps_To/simps_to.ML +++ b/thys/ML_Unification/Simps_To/simps_to.ML @@ -1,63 +1,72 @@ (* Title: ML_Unification/simps_to.ML Author: Kevin Kappelmann Create SIMPS_TO theorems. *) signature SIMPS_TO = sig include HAS_LOGGER val dest_SIMPS_TO : term -> (term * term) val cdest_SIMPS_TO : cterm -> (cterm * cterm) - val mk_SIMPS_TO_cprop : cterm -> cterm -> cterm - val mk_SIMPS_TO_var_cprop : Proof.context -> cterm -> cterm + val mk_SIMPS_TO_cprop : cterm * cterm -> cterm val finish_SIMPS_TO_tac : Proof.context -> int -> tactic + val SIMPS_TO_tac : (int -> tactic) -> Proof.context -> int -> tactic - val SIMPS_TO_thmsq : (int -> tactic) -> Proof.context -> cterm -> thm Seq.seq - val SIMPS_TO_thm_resultsq : (int -> tactic) -> Proof.context -> cterm -> - (thm * cterm) Seq.seq + val SIMPS_TO_thmsq : (int -> tactic) -> Proof.context -> cterm * cterm -> thm Seq.seq + + val simp_inst_tac : (int -> tactic) -> int -> tactic + + val SIMPS_TO_thm_resultsq : (int -> tactic) -> Proof.context -> cterm -> (thm * cterm) Seq.seq end structure Simps_To : SIMPS_TO = struct val logger = Logger.setup_new_logger simps_to_base_logger "Simps_To" structure Util = Tactic_Util val dest_SIMPS_TO = \<^Const_fn>\SIMPS_TO _ for lhs rhs => \(lhs, rhs)\\ val cdest_SIMPS_TO = Thm.dest_comb #>> Thm.dest_arg -fun mk_SIMPS_TO_cprop clhs crhs = \<^instantiate>\'a = \Thm.ctyp_of_cterm clhs\ and clhs and crhs +fun mk_SIMPS_TO_cprop (clhs, crhs) = \<^instantiate>\'a = \Thm.ctyp_of_cterm clhs\ and clhs and crhs in cprop\PROP (SIMPS_TO clhs crhs)\ for clhs :: 'a\ -fun mk_SIMPS_TO_var_cprop ctxt ct = - Var (("x", Thm.maxidx_of_cterm ct + 1), Thm.typ_of_cterm ct) - |> Thm.cterm_of ctxt - |> mk_SIMPS_TO_cprop ct - fun finish_SIMPS_TO_tac ctxt = match_tac ctxt [@{thm SIMPS_TOI}] +fun SIMPS_TO_tac simp_tac ctxt = simp_tac THEN' finish_SIMPS_TO_tac ctxt -fun SIMPS_TO_thmsq simps_to_tac ctxt ct = - let fun inst_tac cconcl _ = - PRIMITIVE (cdest_SIMPS_TO cconcl |> swap |> Thm.match |> Thm.instantiate) +fun SIMPS_TO_thmsq simp_tac ctxt (ctp as (clhs, crhs)) = + (let val goal = mk_SIMPS_TO_cprop ctp in - (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ + (@{log Logger.TRACE} ctxt (fn _ => Pretty.block [ Pretty.str "Creating ", Syntax.pretty_term ctxt @{term SIMPS_TO}, Pretty.str " theorems for ", - Syntax.pretty_term ctxt (Thm.term_of ct) + Unification_Util.pretty_terms ctxt (map Thm.term_of [clhs, crhs]) ] |> Pretty.string_of); - mk_SIMPS_TO_var_cprop ctxt ct - |> Util.HEADGOAL (Util.apply_tac ( - simps_to_tac - THEN' Tactic_Util.CSUBGOAL_STRIPPED (snd o snd) inst_tac - THEN' finish_SIMPS_TO_tac ctxt))) + Util.HEADGOAL (Util.apply_tac (SIMPS_TO_tac simp_tac ctxt)) goal) + end) + handle TYPE _ => (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ + Pretty.str "Types of terms ", + Unification_Util.pretty_terms ctxt (map Thm.term_of [clhs, crhs]), + Pretty.str " not equal" + ] |> Pretty.string_of); + Seq.empty) + +fun simp_inst_tac simp_tac = + let fun inst_tac cconcl = + PRIMITIVE (cdest_SIMPS_TO cconcl |> swap |> Thm.first_order_match |> Thm.instantiate) + in simp_tac THEN' Tactic_Util.CSUBGOAL_STRIPPED (snd o snd) (K o inst_tac) end + +fun SIMPS_TO_thm_resultsq simp_tac ctxt ct = + let val cvar = Term_Util.fresh_var "x" (Thm.typ_of_cterm ct) (Thm.maxidx_of_cterm ct) + |> fst |> Thm.cterm_of ctxt + in + SIMPS_TO_thmsq (simp_inst_tac simp_tac) ctxt (ct, cvar) + |> Seq.map (fn thm => (thm, Thm.cconcl_of thm |> cdest_SIMPS_TO |> snd)) end -val SIMPS_TO_thm_resultsq = - Seq.map (fn thm => (thm, Thm.cconcl_of thm |> Thm.dest_arg)) ooo SIMPS_TO_thmsq - end \ No newline at end of file diff --git a/thys/ML_Unification/Simps_To/simps_to_unif.ML b/thys/ML_Unification/Simps_To/simps_to_unif.ML --- a/thys/ML_Unification/Simps_To/simps_to_unif.ML +++ b/thys/ML_Unification/Simps_To/simps_to_unif.ML @@ -1,71 +1,67 @@ (* Title: ML_Unification/simps_to_unif.ML Author: Kevin Kappelmann Create SIMPS_TO_UNIF theorems. *) signature SIMPS_TO_UNIF = sig include HAS_LOGGER val dest_SIMPS_TO_UNIF : term -> (term * term) val cdest_SIMPS_TO_UNIF : cterm -> (cterm * cterm) - val mk_SIMPS_TO_UNIF_cprop : cterm -> cterm -> cterm + val mk_SIMPS_TO_UNIF_cprop : cterm * cterm -> cterm - val mk_SIMPS_TO_UNIF_thm : thm -> thm -> thm + val SIMPS_TO_UNIF_tac : (int -> tactic) -> (int -> thm -> 'a Seq.seq) -> Proof.context -> int -> + thm -> 'a Seq.seq val SIMPS_TO_UNIF_env_thmsq : (int -> tactic) -> Unification_Base.normalisers -> - Unification_Base.unifier -> term Binders.binders -> Proof.context -> term * term -> Envir.env -> + Unification_Base.closed_unifier -> Proof.context -> term * term -> Envir.env -> (Envir.env * thm) Seq.seq end structure Simps_To_Unif : SIMPS_TO_UNIF = struct val logger = Logger.setup_new_logger simps_to_base_logger "Simps_To_Unif" val dest_SIMPS_TO_UNIF = \<^Const_fn>\SIMPS_TO_UNIF _ for lhs rhs => \(lhs, rhs)\\ val cdest_SIMPS_TO_UNIF = Thm.dest_comb #>> Thm.dest_arg -fun mk_SIMPS_TO_UNIF_cprop clhs crhs = \<^instantiate>\'a = \Thm.ctyp_of_cterm clhs\ and clhs and crhs - in cprop\PROP (SIMPS_TO_UNIF clhs crhs)\ for clhs :: 'a\ - -fun mk_SIMPS_TO_UNIF_thm SIMPS_TO_thm eq_thm = - Drule.incr_indexes2 SIMPS_TO_thm eq_thm @{thm SIMPS_TO_UNIFI} - |> Thm_Util.match_implies_elim SIMPS_TO_thm - |> Thm_Util.match_implies_elim eq_thm +fun mk_SIMPS_TO_UNIF_cprop (clhs, crhs) = \<^instantiate>\'a = \Thm.ctyp_of_cterm clhs\ + and clhs and crhs in cprop\PROP (SIMPS_TO_UNIF clhs crhs)\ for clhs :: 'a\ -fun SIMPS_TO_UNIF_env_thmsq simps_to_tac norms unif binders ctxt (lhs, rhs) env = - let - val SIMPS_TO_res = Binders.replace_binders binders lhs - |> Thm.cterm_of ctxt - |> Simps_To.SIMPS_TO_thm_resultsq simps_to_tac ctxt - fun unif_res (SIMPS_TO_thm, clhs_norm) = - let val tp = (Binders.replace_frees binders (Thm.term_of clhs_norm), rhs) - in - (@{log Logger.TRACE} ctxt (fn _ => Pretty.block [ - Pretty.str "Result: ", - Thm.pretty_thm ctxt SIMPS_TO_thm, - Pretty.str " Now unifying ", - Unification_Util.pretty_unif_problem ctxt tp - ] |> Pretty.string_of); - unif binders ctxt tp env - |> Seq.map (pair SIMPS_TO_thm)) - end - fun SIMPS_TO_UNIF_res (SIMPS_TO_thm, (env, eq_thm)) = - let val (SIMPS_TO_thm, eq_thm) = - (#norm_thm norms ctxt env SIMPS_TO_thm, #norm_unif_thm norms ctxt env eq_thm) - in (env, mk_SIMPS_TO_UNIF_thm SIMPS_TO_thm eq_thm) end +fun SIMPS_TO_UNIF_tac simp_tac eq_tac ctxt = + Tactic_Util.THEN' ( + match_tac ctxt [@{thm SIMPS_TO_UNIFI}] + THEN' Simps_To.SIMPS_TO_tac (Simps_To.simp_inst_tac simp_tac) ctxt, + eq_tac) + +fun SIMPS_TO_UNIF_env_thmsq simp_tac norms unif ctxt (tp as (lhs, rhs)) env = + (let + val goal = apply2 (Thm.cterm_of ctxt) tp |> mk_SIMPS_TO_UNIF_cprop |> Goal.init + fun eq_tac i state = + let + val tp = Thm.cprem_of state i |> Thm.dest_equals |> apply2 Thm.term_of + fun norm_resolve (env, eq_thm) = #norm_unif_thm norms ctxt env eq_thm + |> Thm.implies_elim (#norm_thm norms ctxt env state) + |> pair env + in unif ctxt tp env |> Seq.map norm_resolve end in - (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ + (@{log Logger.TRACE} ctxt (fn _ => Pretty.block [ Pretty.str "Creating ", Syntax.pretty_term ctxt @{term SIMPS_TO_UNIF}, Pretty.str " theorems for ", Unification_Util.pretty_terms ctxt [lhs, rhs] ] |> Pretty.string_of); - SIMPS_TO_res - |> Seq.maps unif_res - |> Seq.map SIMPS_TO_UNIF_res) - end + Tactic_Util.HEADGOAL (SIMPS_TO_UNIF_tac simp_tac eq_tac ctxt) goal + |> Seq.map (apsnd Goal.conclude)) + end) + handle TYPE _ => (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ + Pretty.str "Types of terms ", + Unification_Util.pretty_terms ctxt [lhs, rhs], + Pretty.str " not equal" + ] |> Pretty.string_of); + Seq.empty) end diff --git a/thys/ML_Unification/Term_Index/discrimination_tree.ML b/thys/ML_Unification/Term_Index/discrimination_tree.ML --- a/thys/ML_Unification/Term_Index/discrimination_tree.ML +++ b/thys/ML_Unification/Term_Index/discrimination_tree.ML @@ -1,179 +1,181 @@ (* Title: Term_Index/discrimination_tree.ML Author: Lawrence C Paulson, Cambridge University Computer Laboratory, Sebastian Willenbrink, Kevin Kappelmann, TU Munich Discrimination trees based on Pure/net.ML. Requires operands to be beta-eta-short normalised. *) structure Discrimination_Tree : TERM_INDEX = struct structure TIB = Term_Index_Base val norm_term = Term_Normalisation.beta_eta_short datatype key = CombK | VarK | AtomK of string (*Keys are preorder lists of symbols -- Combinations, Vars, Atoms. Any term whose head is a Var is regarded entirely as a Var. Abstractions are also regarded as Vars; this covers eta-conversion and "near" eta-conversions such as %x. ?P (?f x).*) fun add_key_of_terms (t, ks) = let fun rands (t, ks) = case t of (*other cases impossible due to beta-eta-short norm*) t1 $ t2 => CombK :: rands (t1, add_key_of_terms (t2, ks)) | Const (n, _) => AtomK n :: ks | Free (n, _) => AtomK n :: ks | Bound i => AtomK (Name.bound i) :: ks in case head_of t of Var _ => VarK :: ks | Abs _ => VarK :: ks | _ => rands (t, ks) end (*convert a term to a list of keys*) fun key_of_term t = add_key_of_terms (t, []) (*Trees indexed by key lists: each arc is labelled by a key. Each node contains a list of values, and arcs to children. The empty key addresses the entire tree. Lookup functions preserve order of values stored at same level.*) datatype 'a term_index = Leaf of 'a list | Tree of {comb: 'a term_index, var: 'a term_index, atoms: 'a term_index Symtab.table} val empty = Leaf [] fun is_empty (Leaf []) = true | is_empty _ = false val empty_tree = Tree {comb=empty, var=empty, atoms=Symtab.empty} (* insert *) (*Adds value x to the list at the node addressed by the keys. Creates node if not already present. is_dup decides whether a value is a duplicate. The empty list of keys generates a Leaf node, others a Tree node.*) fun insert_keys is_dup x ksdtp = case ksdtp of ([], Leaf xs) => if exists is_dup xs then raise TIB.INSERT else Leaf (x :: xs) | (keys, Leaf []) => insert_keys is_dup x (keys, empty_tree) (*expand the tree*) | (CombK :: keys, Tree {comb, var, atoms}) => Tree {comb=insert_keys is_dup x (keys, comb), var=var, atoms=atoms} | (VarK :: keys, Tree {comb, var, atoms}) => Tree {comb=comb, var=insert_keys is_dup x (keys, var), atoms=atoms} | (AtomK a :: keys, Tree {comb, var, atoms}) => let val atoms' = Symtab.map_default (a, empty) (fn dt' => insert_keys is_dup x (keys, dt')) atoms in Tree {comb=comb, var=var, atoms=atoms'} end fun insert is_dup (t, x) dt = insert_keys is_dup x (key_of_term t, dt) fun insert_safe is_dup entry dt = insert is_dup entry dt handle TIB.INSERT => dt (* delete *) (*Create a new Tree node if it would be nonempty*) fun new_tree (args as {comb, var, atoms}) = if is_empty comb andalso is_empty var andalso Symtab.is_empty atoms then empty else Tree args (*Deletes values from the list at the node addressed by the keys. Raises DELETE if absent. Collapses the tree if possible. sel is the selector for values to be deleted.*) fun delete_keys sel ksdtp = case ksdtp of ([], Leaf xs) => if exists sel xs then Leaf (filter_out sel xs) else raise TIB.DELETE | (_, Leaf []) => raise TIB.DELETE | (CombK :: keys, Tree {comb, var, atoms}) => new_tree {comb=delete_keys sel (keys,comb), var=var, atoms=atoms} | (VarK :: keys, Tree {comb, var, atoms}) => new_tree {comb=comb, var=delete_keys sel (keys,var), atoms=atoms} | (AtomK a :: keys, Tree {comb, var, atoms}) => let val atoms' = (case Symtab.lookup atoms a of NONE => raise TIB.DELETE | SOME dt' => (case delete_keys sel (keys, dt') of Leaf [] => Symtab.delete a atoms | dt'' => Symtab.update (a, dt'') atoms)) in new_tree {comb=comb, var=var, atoms=atoms'} end fun delete eq t dt = delete_keys eq (key_of_term t, dt) fun delete_safe eq t dt = delete eq t dt handle TIB.DELETE => dt (* retrievals *) type 'a retrieval = 'a term_index -> term -> 'a list fun variants_keys dt keys = case (dt, keys) of (Leaf xs, []) => xs | (Leaf _, (_ :: _)) => [] (*non-empty keys and empty dt*) | (Tree {comb, ...}, (CombK :: keys)) => variants_keys comb keys | (Tree {var, ...}, (VarK :: keys)) => variants_keys var keys | (Tree {atoms, ...}, (AtomK a :: keys)) => case Symtab.lookup atoms a of SOME dt => variants_keys dt keys | NONE => [] fun variants dt t = variants_keys dt (key_of_term t) (*Skipping a term in a tree. Recursively skip 2 levels if a combination*) fun tree_skip (Leaf _) dts = dts | tree_skip (Tree {comb, var, atoms}) dts = var :: dts |> Symtab.fold (fn (_, dt) => fn dts => dt :: dts) atoms (* tree_skip comb [] only skips first term, so another skip is required *) |> fold_rev tree_skip (tree_skip comb []) (*conses the linked tree, if present, to dts*) fun look1 (atoms, a) dts = case Symtab.lookup atoms a of NONE => dts | SOME dt => dt :: dts datatype query = Instances | Generalisations | Unifiables (*Return the nodes accessible from the term (cons them before trees) Var in tree matches any term. Abs or Var in object: for unification, regarded as a wildcard; otherwise only matches a variable.*) fun query q t dt dts = let fun rands _ (Leaf _) dts = dts | rands t (Tree {comb, atoms, ...}) dts = case t of f $ t => fold_rev (query q t) (rands f comb []) dts | Const (n, _) => look1 (atoms, n) dts | Free (n, _) => look1 (atoms, n) dts | Bound i => look1 (atoms, Name.bound i) dts in case dt of Leaf _ => dts | Tree {var, ...} => case (head_of t, q) of (Var _, Generalisations) => var :: dts (*only matches Var in dt*) | (Var _, _) => tree_skip dt dts | (Abs _, Generalisations) => var :: dts (*only a Var can match*) (*A var instantiation in the abstraction could allow an eta-reduction, so regard the abstraction as a wildcard.*) | (Abs _, _) => tree_skip dt dts | (_, Instances) => rands t dt dts | (_, _) => rands t dt (var :: dts) (*var could also match*) end fun extract_leaves ls = maps (fn Leaf xs => xs) ls fun generalisations dt t = query Generalisations t dt [] |> extract_leaves fun unifiables dt t = query Unifiables t dt [] |> extract_leaves fun instances dt t = query Instances t dt [] |> extract_leaves (* merge *) fun cons_fst x (xs, y) = (x :: xs, y) fun dest (Leaf xs) = map (pair []) xs | dest (Tree {comb, var, atoms}) = flat [ map (cons_fst CombK) (dest comb), map (cons_fst VarK) (dest var), maps (fn (a, net) => map (cons_fst (AtomK a)) (dest net)) (Symtab.dest atoms) ] fun content dt = map #2 (dest dt) fun merge eq dt1 dt2 = if pointer_eq (dt1, dt2) then dt1 else if is_empty dt1 then dt2 - else fold (fn (ks, v) => fn dt => insert_keys (curry eq v) v (ks, dt)) (dest dt2) dt1 + else fold + (fn (ks, v) => fn dt => insert_keys (curry eq v) v (ks, dt) handle TIB.INSERT => dt) + (dest dt2) dt1 end diff --git a/thys/ML_Unification/Term_Index/term_index.ML b/thys/ML_Unification/Term_Index/term_index.ML --- a/thys/ML_Unification/Term_Index/term_index.ML +++ b/thys/ML_Unification/Term_Index/term_index.ML @@ -1,46 +1,47 @@ (* Title: Term_Index/term_index.ML Author: Kevin Kappelmann, Sebastian Willenbrink Signatures for term indexes. For a brief overview, see: https://en.wikipedia.org/wiki/Term_indexing *) signature TERM_INDEX_BASE = sig exception INSERT exception DELETE end structure Term_Index_Base : TERM_INDEX_BASE = struct exception INSERT exception DELETE end signature TERM_INDEX = sig type 'a term_index val empty : 'a term_index val is_empty : 'a term_index -> bool val content : 'a term_index -> 'a list (*puts a term into normal form as required by the index structure*) val norm_term : Term_Normalisation.term_normaliser (*first argument decides which values should be considered as duplicates; raises Term_Index_Base.INSERT if the term, value pair is already inserted*) val insert : ('a -> bool) -> (term * 'a) -> 'a term_index -> 'a term_index val insert_safe : ('a -> bool) -> (term * 'a) -> 'a term_index -> 'a term_index (*first argument selects which values for the given term should be removed; raises Term_Index_Base.DELETE if no value is deleted*) val delete : ('a -> bool) -> term -> 'a term_index -> 'a term_index val delete_safe : ('a -> bool) -> term -> 'a term_index -> 'a term_index type 'a retrieval = 'a term_index -> term -> 'a list val variants : 'a retrieval val generalisations : 'a retrieval val instances : 'a retrieval val unifiables : 'a retrieval - (*first argument is the equality test for values*) + (*first argument is the equality test for values; + prefers values from the first index in case of duplicates*) val merge : (('a * 'a) -> bool) -> 'a term_index -> 'a term_index -> 'a term_index -end \ No newline at end of file +end diff --git a/thys/ML_Unification/Term_Index/term_index_data.ML b/thys/ML_Unification/Term_Index/term_index_data.ML --- a/thys/ML_Unification/Term_Index/term_index_data.ML +++ b/thys/ML_Unification/Term_Index/term_index_data.ML @@ -1,32 +1,32 @@ (* Title: Term_Index/term_index_data.ML Author: Kevin Kappelmann Term index data stored in generic contexts. *) signature TERM_INDEX_DATA_ARGS = sig type data structure TI : TERM_INDEX val data_eq : data * data -> bool end functor Term_Index_Generic_Data_Args(P : TERM_INDEX_DATA_ARGS) : GENERIC_DATA_ARGS = struct type T = P.data P.TI.term_index val empty = P.TI.empty -fun merge (ti1, ti2)= P.TI.merge P.data_eq ti1 ti2 +fun merge (ti1, ti2) = P.TI.merge P.data_eq ti1 ti2 end signature TERM_INDEX_DATA = sig structure TI : TERM_INDEX structure TI_Data : GENERIC_DATA end functor Term_Index_Data(P : TERM_INDEX_DATA_ARGS) : TERM_INDEX_DATA = struct structure TI = P.TI structure TI_Data = Generic_Data(Term_Index_Generic_Data_Args(P)) end diff --git a/thys/ML_Unification/Tests/First_Order_ML_Unification_Tests.thy b/thys/ML_Unification/Tests/First_Order_ML_Unification_Tests.thy --- a/thys/ML_Unification/Tests/First_Order_ML_Unification_Tests.thy +++ b/thys/ML_Unification/Tests/First_Order_ML_Unification_Tests.thy @@ -1,238 +1,240 @@ \<^marker>\creator "Kevin Kappelmann"\ \<^marker>\contributor "Paul Bachmann"\ subsection \First-Order ML Unification Tests\ theory First_Order_ML_Unification_Tests imports ML_Unification_Tests_Base begin paragraph \Summary\ text \Tests for @{ML_structure "First_Order_Unification"}.\ ML\ structure Prop = SpecCheck_Property structure UC = Unification_Combinator open Unification_Tests_Base structure Unif = First_Order_Unification structure Norm = Envir_Normalisation val match = Unif.match [] val match_hints = let fun match binders = UC.add_fallback_matcher (Unif.e_match Unification_Util.match_types) ((fn binders => (Hints.map_retrieval (Hints.mk_retrieval Hints.TI.generalisations |> K) - #> Hints.UH.map_concl_unifier (Higher_Order_Pattern_Unification.match |> K) + #> Hints.UH.map_concl_unifier (Higher_Order_Pattern_Unification.match + |> Type_Unification.e_match Unification_Util.match_types |> K) #> Hints.UH.map_normalisers (Unification_Util.beta_eta_short_norms_match |> K) #> Hints.UH.map_prems_unifier (match |> UC.norm_matcher Norm.beta_norm_term_match |> K)) |> Context.proof_map #> Test_Unification_Hints.try_hints binders) |> UC.norm_matcher (#norm_term Unif.norms_match)) binders in match [] end val unify = Unif.unify [] val unify_hints = let fun unif binders = UC.add_fallback_unifier (Unif.e_unify Unification_Util.unify_types) ((fn binders => - (Hints.UH.map_concl_unifier (Higher_Order_Pattern_Unification.match |> K) + (Hints.UH.map_concl_unifier (Higher_Order_Pattern_Unification.match + |> Type_Unification.e_match Unification_Util.match_types |> K) #> Hints.UH.map_normalisers (Unification_Util.beta_eta_short_norms_unif |> K) #> Hints.UH.map_prems_unifier (unif |> UC.norm_unifier Norm.beta_norm_term_unif |> K)) |> Context.proof_map #> Test_Unification_Hints.try_hints binders) |> UC.norm_unifier (#norm_term Unif.norms_unify)) binders in unif [] end \ subsubsection \Matching\ paragraph \Unit Tests\ ML_command\ let val ctxt = Proof_Context.set_mode Proof_Context.mode_schematic @{context} |> Variable.declare_term @{term "f :: nat \ bool \ nat"} val tests = map (apply2 (Syntax.read_term ctxt))[ ("(?x :: nat \ ?'Z) 1", "f 1"), ("?x :: nat", "(?x + 1 :: nat)"), ("(g :: nat \ nat \ nat) ?x ?x", "(g :: nat \ nat \ nat) (?x + 1) (?x + 1)"), ("\y. (?x :: nat \ ?'Z) y", "\y. f y"), ("(g :: ?'X \ ?'Y \ ?'Z) (x :: ?'X) (y :: ?'Y)", "(g :: ?'Y \ ?'Z \ ?'Z) (x :: ?'Y) (y :: ?'Z)"), ("(g :: ?'Z \ ?'X)", "\(x :: ?'X). (g :: ?'X \ ?'Y) x"), ("\ (x :: nat). (0 :: nat)", "\ (x :: nat). (0 :: nat)"), ("\ (x :: nat). x", "\ (x :: nat). x"), ("\ (x :: nat) (y :: bool). (x, y)", "\ (x :: nat) (y :: bool). (x, y)"), ("\ (x :: ?'A) (y :: bool). (?x :: ?'A \ bool \ ?'Z) x y", "\ (x :: nat) (y :: bool). f x y"), ("\(x :: ?'X). (g :: ?'X \ ?'X) x", "(g :: ?'X \ ?'X)"), ("?g ?x ?y d", "g ?y ?x d"), ("f 0 True", "(\x y. f y x) True 0"), ("\ (x :: ?'a) y. f y", "\(x :: ?'b). f"), ("\y z. (?x :: nat \ bool \ nat) y z", "f"), ("\x. (?f :: nat \ bool \ nat) 0 x", "\x. f 0 x") ] val check = check_unit_tests_hints_match tests true [] in Lecker.test_group ctxt () [ check "match" match, check "match_hints" match_hints ] end \ subparagraph \Asymmetry\ ML_command\ let val ctxt = Proof_Context.set_mode Proof_Context.mode_schematic @{context} |> Variable.declare_term @{term "f :: nat \ bool \ nat"} val tests = map (apply2 (Syntax.read_term ctxt))[ ("f 1", "(?x :: nat \ ?'Z) 1") ] val check = check_unit_tests_hints_match tests false [] in Lecker.test_group ctxt () [ check "match" match, check "match_hints" match_hints ] end \ subparagraph \Ground Hints\ ML_command\ let val ctxt = Proof_Context.set_mode Proof_Context.mode_schematic @{context} val hints = map (Skip_Proof.make_thm (Proof_Context.theory_of ctxt) o Syntax.read_term ctxt) [ "?x \ 2 \ ?x + (-?x :: int) \ 0", "?x \ 0 \ ?y \ 0 \ (?x :: int) + ?y \ 0" ] val tests = map (apply2 (Syntax.read_term ctxt))[ ("(2 + -2) + (0 + 0) :: int", "0 :: int") ] val check_hints = check_unit_tests_hints_match tests in Lecker.test_group ctxt () [ check_hints false [] "match" match, check_hints false [] "match_hints without hints" match_hints, check_hints true hints "match_hints with hints" match_hints ] end \ subsubsection \Unification\ paragraph \Generated Tests\ ML_command\ structure Test_Params = struct val unify = unify val unify_hints = unify_hints val params = { nv = 4, ni = 2, max_h = 5, max_args = 4 } end structure First_Order_Tests = First_Order_Unification_Tests(Test_Params) val _ = First_Order_Tests.tests @{context} (SpecCheck_Random.new ()) \ paragraph \Unit Tests\ subparagraph \Occurs-Check\ ML_command\ let val unit_tests = [ ( Var (("x", 0), TVar (("X", 0), [])), Var (("y", 0), TVar (("X", 0), []) --> TFree ("'a", [])) $ Var (("x", 0), TVar (("X", 0), [])) ), ( Var (("y", 0), TVar (("X", 0), []) --> TFree ("'a", [])) $ Var (("x", 0), TVar (("X", 0), [])), Var (("x", 0), TVar (("X", 0), [])) ), ( Free (("f", [TVar (("X", 0), []), TVar (("X", 0), [])] ---> TFree ("'a", []))) $ Var (("x", 0), TVar (("X", 0), [])) $ Var (("y", 0), TVar (("X", 0), [])), Free (("f", [TVar (("X", 0), []), TVar (("X", 0), [])] ---> TFree ("'a", []))) $ Var (("y", 0), TVar (("X", 0), [])) $ ( Free (("g", TVar (("X", 0), []) --> TVar (("X", 0), []))) $ Var (("x", 0), TVar (("X", 0), [])) ) ), ( Free (("f", [TVar (("X", 0), []), TVar (("Y", 0), [])] ---> TFree ("'a", []))) $ Var (("x", 0), TVar (("X", 0), [])) $ Var (("y", 0), TVar (("Y", 0), [])), Free (("f", [TVar (("Y", 0), []), TVar (("X", 0), [])] ---> TFree ("'a", []))) $ Var (("y", 0), TVar (("Y", 0), [])) $ ( Free (("g", TVar (("X", 0), []) --> TVar (("X", 0), []))) $ Var (("x", 0), TVar (("X", 0), [])) ) ) ] fun check name unif ctxt _ = check_list unit_tests ("occurs-check for " ^ name) (Prop.prop (not o terms_unify_thms_correct_unif ctxt unif)) ctxt |> K () in Lecker.test_group @{context} () [ check "unify" unify, check "unify_hints" unify_hints ] end \ subparagraph \Lambda-Abstractions\ ML_command\ let val ctxt = Proof_Context.set_mode Proof_Context.mode_schematic @{context} |> Variable.declare_term @{term "f :: nat \ bool \ nat"} val tests = map (apply2 (Syntax.read_term ctxt))[ ("\ (x :: nat). (0 :: nat)", "\ (x :: nat). (0 :: nat)"), ("\ (x :: nat). x", "\ (x :: nat). x"), ("\ (x :: nat) (y :: bool). (x, y)", "\ (x :: nat) (y :: bool). (x, y)"), ("\ (x :: ?'a) y. f y", "\(x :: ?'b). f"), ("\ (x :: nat) (y :: bool). f x y", "\ (x :: nat) (y :: bool). (?x :: nat \ bool \ ?'Z) x y"), ("\x. (?f :: nat \ bool \ nat) 0 x", "\x. f ?g x"), ("(?f :: nat \ bool \ nat) (?n :: nat)", "?g :: bool \ nat") ] val check = check_unit_tests_hints_unif tests true [] in Lecker.test_group ctxt () [ check "unify" unify, check "unify_hints" unify_hints ] end \ ML_command\ let val ctxt = Proof_Context.set_mode Proof_Context.mode_schematic @{context} val hints = map (Skip_Proof.make_thm (Proof_Context.theory_of ctxt) o Syntax.read_term ctxt) [ "?x \ (0 :: nat) \ ?y \ ?z \ ?x + ?y \ ?z" ] val tests = map (apply2 (Syntax.read_term ctxt))[ ("\ (x :: nat) (y :: bool). x", "\ (x :: nat) (y :: bool). 0 + x"), ("0 + (\ (x :: nat) (y :: bool). x) 0 ?y", "0 + (\ (x :: nat) (y :: bool). 0 + x) 0 ?z") ] val check_hints = check_unit_tests_hints_unif tests in Lecker.test_group ctxt () [ check_hints false [] "unify" unify, check_hints false [] "unify_hints without hints" unify_hints, check_hints true hints "unify_hints with hints" unify_hints ] end \ end diff --git a/thys/ML_Unification/Tests/Higher_Order_Pattern_ML_Unification_Tests.thy b/thys/ML_Unification/Tests/Higher_Order_Pattern_ML_Unification_Tests.thy --- a/thys/ML_Unification/Tests/Higher_Order_Pattern_ML_Unification_Tests.thy +++ b/thys/ML_Unification/Tests/Higher_Order_Pattern_ML_Unification_Tests.thy @@ -1,186 +1,192 @@ \<^marker>\creator "Kevin Kappelmann"\ \<^marker>\contributor "Paul Bachmann"\ subsection \Higher-Order Pattern ML Unification Tests\ theory Higher_Order_Pattern_ML_Unification_Tests imports ML_Unification_Tests_Base begin paragraph \Summary\ text \Tests for @{ML_structure "Higher_Order_Pattern_Unification"}.\ ML\ structure Prop = SpecCheck_Property structure UC = Unification_Combinator open Unification_Tests_Base structure Unif = Higher_Order_Pattern_Unification - val match = Unif.match [] + val norm_match_types = Type_Unification.e_match Unification_Util.match_types + val match = Unif.match |> norm_match_types + val closed_match = match [] val match_hints = let fun match binders = UC.add_fallback_matcher - (fn match_theory => Unif.e_match Unification_Util.match_types match_theory match_theory) + (fn match_theory => Unif.e_match Unification_Util.match_types match_theory match_theory + |> norm_match_types) ((fn binders => (Hints.map_retrieval (Hints.mk_retrieval Hints.TI.generalisations |> K) - #> Hints.UH.map_concl_unifier (Unif.match |> K) + #> Hints.UH.map_concl_unifier (match |> K) #> Hints.UH.map_normalisers (Unif.norms_match |> K) #> Hints.UH.map_prems_unifier (match |> K)) |> Context.proof_map #> Test_Unification_Hints.try_hints binders) |> UC.norm_matcher (#norm_term Unif.norms_match)) binders in match [] end - val unify = Unif.unify [] + val norm_unif_types = Type_Unification.e_unify Unification_Util.unify_types + val unify = Unif.unify |> norm_unif_types + val closed_unify = unify [] val unify_hints = let fun unif binders = UC.add_fallback_unifier - (fn unif_theory => Unif.e_unify Unification_Util.unify_types unif_theory unif_theory) + (fn unif_theory => Unif.e_unify Unification_Util.unify_types unif_theory unif_theory + |> norm_unif_types) ((fn binders => - (Hints.UH.map_concl_unifier (Unif.match |> K) + (Hints.UH.map_concl_unifier (match |> K) #> Hints.UH.map_normalisers (Unif.norms_unify |> K) #> Hints.UH.map_prems_unifier (unif |> K)) |> Context.proof_map #> Test_Unification_Hints.try_hints binders) |> UC.norm_unifier (#norm_term Unif.norms_unify)) binders in unif [] end \ subsubsection \Matching\ paragraph \Unit Tests\ ML_command\ let val ctxt = Proof_Context.set_mode Proof_Context.mode_schematic @{context} |> Variable.declare_term @{term "f :: nat \ bool \ nat"} val tests = map (apply2 (Syntax.read_term ctxt))[ ("?x :: nat", "(?x + 1 :: nat)"), ("(g :: nat \ nat \ nat) ?x ?x", "(g :: nat \ nat \ nat) (?x + 1) (?x + 1)"), ("\y. (?x :: nat \ ?'Z) y", "\y. f y"), ("(g :: ?'X \ ?'Y \ ?'Z) (x :: ?'X) (y :: ?'Y)", "(g :: ?'Y \ ?'Z \ ?'Z) (x :: ?'Y) (y :: ?'Z)"), ("(g :: ?'Z \ ?'X)", "\(x :: ?'X). (g :: ?'X \ ?'Y) x"), ("\ (x :: nat). (0 :: nat)", "\ (x :: nat). (0 :: nat)"), ("\ (x :: nat). x", "\ (x :: nat). x"), ("\ (x :: nat) (y :: bool). (x, y)", "\ (x :: nat) (y :: bool). (x, y)"), ("\ (x :: ?'A) (y :: bool). (?x :: ?'A \ bool \ ?'Z) x y", "\ (x :: nat) (y :: bool). f x y"), ("\(x :: ?'X). (g :: ?'X \ ?'X) x", "(g :: ?'X \ ?'X)"), ("\y. (?x :: nat \ bool \ nat) y", "\y. f y"), ("\y z. (?x :: nat \ bool \ nat) y z", "f") ] val check = check_unit_tests_hints_match tests true [] in Lecker.test_group ctxt () [ - check "match" match, + check "match" closed_match, check "match_hints" match_hints ] end \ subparagraph \Asymmetry\ ML_command\ let val ctxt = Proof_Context.set_mode Proof_Context.mode_schematic @{context} |> Variable.declare_term @{term "f :: nat \ bool \ nat"} val tests = map (apply2 (Syntax.read_term ctxt))[ ("\y. f y", "\y. (?x :: nat \ bool \ nat) y") ] val check = check_unit_tests_hints_match tests false [] in Lecker.test_group ctxt () [ - check "match" match, + check "match" closed_match, check "match_hints" match_hints ] end \ subparagraph \Ground Hints\ ML_command\ let val ctxt = Proof_Context.set_mode Proof_Context.mode_schematic @{context} val hints = map (Skip_Proof.make_thm (Proof_Context.theory_of ctxt) o Syntax.read_term ctxt) [ "?x \ 2 \ ?x + (-?x :: int) \ 0", "?x \ 0 \ ?y \ 0 \ (?x :: int) + ?y \ 0" ] val tests = map (apply2 (Syntax.read_term ctxt))[ ("(2 + -2) + (0 + 0) :: int", "0 :: int") ] val check_hints = check_unit_tests_hints_match tests in Lecker.test_group ctxt () [ - check_hints false [] "match" match, + check_hints false [] "match" closed_match, check_hints false [] "match_hints without hints" match_hints, check_hints true hints "match_hints with hints" match_hints ] end \ subsubsection \Unification\ paragraph \Generated Tests\ subparagraph \First-Order\ ML_command\ structure Test_Params = struct - val unify = unify + val unify = closed_unify val unify_hints = unify_hints val params = { nv = 10, ni = 10, max_h = 5, max_args = 4 } end structure First_Order_Tests = First_Order_Unification_Tests(Test_Params) val _ = First_Order_Tests.tests @{context} (SpecCheck_Random.new ()) \ subparagraph \Higher-Order Pattern\ ML_file\higher_order_pattern_unification_tests.ML\ ML_command\ val ctxt = @{context} val tests = Higher_Order_Pattern_Unification_Tests.unit_tests_unifiable ctxt val check_hints = check_unit_tests_hints_unif tests val _ = Lecker.test_group ctxt () [ - check_hints true [] "unify" unify, + check_hints true [] "unify" closed_unify, check_hints true [] "unify_hints" unify_hints ] \ paragraph \Unit Tests\ subparagraph \With Unification Hints\ ML_command\ let val ctxt = Proof_Context.set_mode Proof_Context.mode_schematic @{context} |> Variable.declare_term @{term "f :: (nat \ nat) \ nat \ nat"} |> Variable.declare_term @{term "g :: nat \ nat \ nat"} |> Variable.declare_term @{term "h :: nat"} val hints = map (Skip_Proof.make_thm (Proof_Context.theory_of ctxt) o Syntax.read_term ctxt) [ "?x \ (0 :: nat) \ ?y \ ?z \ ?x + ?y \ ?z", "(?x :: ?'X) \ (?y :: ?'X) \ id ?x \ ?y", "(?x1 :: nat) \ ?x2 \ (?y1 :: nat) \ ?y2 \ ?x1 + ?y1 \ ?y2 + ?x2" ] val tests = map (apply2 (Syntax.read_term ctxt)) [ ("\ x y. 0 + 1 :: nat", "\ x y. 1 :: nat"), ("\ x. 0 + 0 + x :: nat", "\ x. x :: nat"), ("\ x y. 0 + Suc ?x", "\ x y. Suc 2"), ("\ (x :: nat) (y :: nat). y + (0 + x)", "\ (x :: nat) (y :: nat). x + (0 + y)"), ("f (\ u. g (?x, h), h)", "id (f (\ u. ?y, 0 + h))") ] val check_hints = check_unit_tests_hints_unif tests in Lecker.test_group ctxt () [ - check_hints false [] "unify" unify, + check_hints false [] "unify" closed_unify, check_hints false [] "unify_hints without hints" unify_hints, check_hints true hints "unify_hints with hints" unify_hints ] end \ end diff --git a/thys/ML_Unification/Tests/ML_Unification_Tests_Base.thy b/thys/ML_Unification/Tests/ML_Unification_Tests_Base.thy --- a/thys/ML_Unification/Tests/ML_Unification_Tests_Base.thy +++ b/thys/ML_Unification/Tests/ML_Unification_Tests_Base.thy @@ -1,33 +1,35 @@ \<^marker>\creator "Kevin Kappelmann"\ subsection \Test Setup\ theory ML_Unification_Tests_Base imports ML_Unification_Hints SpecCheck.SpecCheck Main begin paragraph \Summary\ text \Shared setup for unification tests. We use \<^cite>\speccheck\ to generate tests and create unit tests.\ ML\ @{functor_instance struct_name = Test_Unification_Hints and functor_name = Term_Index_Unification_Hints and id = \"test"\ and more_args = \ structure TI = Discrimination_Tree val init_args = { - concl_unifier = SOME Higher_Order_Pattern_Unification.match, + concl_unifier = SOME (Higher_Order_Pattern_Unification.match + |> Type_Unification.e_match Unification_Util.match_types), normalisers = SOME Unification_Util.beta_eta_short_norms_unif, - prems_unifier = SOME Higher_Order_Pattern_Unification.unify, + prems_unifier = SOME (Higher_Order_Pattern_Unification.unify + |> Type_Unification.e_unify Unification_Util.unify_types), retrieval = SOME (Term_Index_Unification_Hints_Args.mk_sym_retrieval TI.norm_term TI.generalisations), hint_preprocessor = SOME (K I) }\} \ ML_file \tests_base.ML\ ML_file \first_order_unification_tests.ML\ end \ No newline at end of file diff --git a/thys/ML_Unification/Unification_Attributes/Unification_Attributes.thy b/thys/ML_Unification/Unification_Attributes/Unification_Attributes.thy --- a/thys/ML_Unification/Unification_Attributes/Unification_Attributes.thy +++ b/thys/ML_Unification/Unification_Attributes/Unification_Attributes.thy @@ -1,72 +1,70 @@ \<^marker>\creator "Kevin Kappelmann"\ -section \Unification Attributes\ theory Unification_Attributes - imports Unify_Resolve_Tactics + imports + Unification_Attributes_Base + ML_Unifiers begin paragraph \Summary\ -text \OF attribute with adjustable unifier.\ - -ML_file\unify_of_base.ML\ -ML_file\unify_of.ML\ +text \Setup of OF attribute with adjustable unifier.\ ML\ @{functor_instance struct_name = Standard_Unify_OF and functor_name = Unify_OF and id = \""\ and more_args = \val init_args = { normalisers = SOME Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify, unifier = SOME Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify, mode = SOME (Unify_OF_Args.PM.key Unify_OF_Args.PM.fact) }\} \ local_setup \Standard_Unify_OF.setup_attribute NONE\ paragraph \Examples\ experiment begin lemma assumes h1: "(PROP A \ PROP D) \ PROP E \ PROP C" assumes h2: "PROP B \ PROP D" and h3: "PROP F \ PROP E" shows "(PROP A \ PROP B) \ PROP F \ PROP C" by (fact h1[uOF h2 h3 where mode = resolve]) \\the line below is equivalent\ (* by (fact h1[OF h2 h3]) *) lemma assumes h1: "(PROP A \ PROP A)" assumes h2: "(PROP A \ PROP A) \ PROP B" shows "PROP B" by (fact h2[uOF h1]) \\the line below is equivalent\ (* by (fact h2[uOF h1 where mode = fact]) *) \\Note: @{attribute OF} would not work in this case:\ (* thm h2[OF h1] *) lemma assumes h1: "\x y z. PROP P x y \ PROP P y y \ (PROP A \ PROP A) \ (PROP A \ PROP B) \ PROP C" and h2: "\x y. PROP P x y" and h3 : "PROP A \ PROP A" and h4 : "PROP D \ PROP B" shows "(PROP A \ PROP D) \ PROP C" by (fact h1[uOF h2 h2 h3, uOF h4 where mode = resolve]) lemma assumes h1: "\P x. PROP P x \ PROP E P x" and h2: "PROP P x" shows "PROP E P x" by (fact h1[uOF h2]) \\the following line does not work (multiple unifiers error)\ (* by (fact h1[OF h2]) *) text\We can also specify the unifier to be used:\ lemma assumes h1: "\P. PROP P \ PROP E" and h2: "\P. PROP P" shows "PROP E" by (fact h1[uOF h2 where unifier = First_Order_Unification.unify]) \\the line below is equivalent\ (* using [[uOF unifier = First_Order_Unification.unify]] by (fact h1[uOF h2]) *) end end diff --git a/thys/ML_Unification/Unification_Attributes/Unification_Attributes_Base.thy b/thys/ML_Unification/Unification_Attributes/Unification_Attributes_Base.thy new file mode 100644 --- /dev/null +++ b/thys/ML_Unification/Unification_Attributes/Unification_Attributes_Base.thy @@ -0,0 +1,13 @@ +\<^marker>\creator "Kevin Kappelmann"\ +section \Unification Attributes\ +theory Unification_Attributes_Base + imports Unify_Resolve_Tactics_Base +begin + +paragraph \Summary\ +text \OF attribute with adjustable unifier.\ + +ML_file\unify_of_base.ML\ +ML_file\unify_of.ML\ + +end diff --git a/thys/ML_Unification/Unification_Hints/ML_Unification_Hints.thy b/thys/ML_Unification/Unification_Hints/ML_Unification_Hints.thy --- a/thys/ML_Unification/Unification_Hints/ML_Unification_Hints.thy +++ b/thys/ML_Unification/Unification_Hints/ML_Unification_Hints.thy @@ -1,108 +1,98 @@ \<^marker>\creator "Kevin Kappelmann"\ section \Unification Hints\ theory ML_Unification_Hints imports - ML_Generic_Data_Utils - ML_Term_Index + ML_Unification_Hints_Base ML_Unifiers - ML_Unification_Parsers begin paragraph \Summary\ -text \A generalisation of unification hints, originally introduced in \<^cite>\"unif-hints"\. -We support a generalisation that -\<^enum> allows additional universal variables in premises -\<^enum> allows non-atomic left-hand sides for premises -\<^enum> allows arbitrary functions to perform the matching/unification of a hint with a disagreement pair. - -General shape of a hint: -\\y1...yn. (\x1...xn1. lhs1 \ rhs1) \ ... \ (\x1...xnk. lhsk \ rhsk) \ lhs \ rhs\ -\ - -ML_file\unification_hints_base.ML\ -ML_file\unification_hints.ML\ -ML_file\term_index_unification_hints.ML\ +text \Setup of unification hints.\ text \We now set up two unifiers using unification hints. The first one allows for recursive applications of unification hints when unifying a hint's conclusion \lhs \ rhs\ with a goal \lhs' \ rhs'\. -The second only uses a combination of higher-order pattern and first-order unification. -This particularly implies that recursive applications of unification hints have to be made explicit -in the hint itself (cf. @{dir "../Examples"}). +The second disallows recursive applications of unification hints. Recursive applications have to be +made explicit in the hint itself (cf. @{dir "../Examples"}). While the former can be convenient for local hint registrations and quick developments, it is advisable to use the second for global hints to avoid unexpected looping behaviour.\ ML\ @{functor_instance struct_name = Standard_Unification_Hints_Rec and functor_name = Term_Index_Unification_Hints and id = \"rec"\ and more_args = \ structure TI = Discrimination_Tree val init_args = { concl_unifier = SOME Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify, prems_unifier = SOME (Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify |> Unification_Combinator.norm_unifier Envir_Normalisation.beta_norm_term_unif), normalisers = SOME Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify, retrieval = SOME (Term_Index_Unification_Hints_Args.mk_sym_retrieval TI.norm_term TI.unifiables), hint_preprocessor = SOME (K I) }\} \ local_setup \Standard_Unification_Hints_Rec.setup_attribute NONE\ text\Standard unification hints using @{ML Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify} when looking for hints are accessible via @{attribute rec_uhint}. \<^emph>\Note:\ when we retrieve a potential unification hint with conclusion \lhs \ rhs\ for a goal \lhs' \ rhs'\, we only consider those hints whose lhs potentially higher-order unifies with lhs' or rhs' \<^emph>\without using hints\. For otherwise, any hint \lhs \ rhs\ applied to a goal \rhs \ lhs\ leads to an immediate loop.\ +declare [[ucombine add = \Standard_Unification_Combine.eunif_data + (Standard_Unification_Hints_Rec.try_hints + |> Unification_Combinator.norm_unifier + (#norm_term Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify) + |> K) + (Standard_Unification_Combine.metadata Standard_Unification_Hints_Rec.binding Prio.LOW)\]] + ML\ @{functor_instance struct_name = Standard_Unification_Hints and functor_name = Term_Index_Unification_Hints and id = \""\ and more_args = \ structure TI = Discrimination_Tree val init_args = { - concl_unifier = SOME (fn binders => - Standard_Unification_Combine.map_eunif_datas (K Prio.Table.empty) - |> Context.proof_map - #> Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify binders), + concl_unifier = NONE, prems_unifier = SOME (Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify |> Unification_Combinator.norm_unifier Envir_Normalisation.beta_norm_term_unif), normalisers = SOME Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify, retrieval = SOME (Term_Index_Unification_Hints_Args.mk_sym_retrieval TI.norm_term TI.unifiables), hint_preprocessor = SOME (K I) }\} \ local_setup \Standard_Unification_Hints.setup_attribute NONE\ +declare [[uhint where concl_unifier = \fn binders => + Standard_Unification_Combine.delete_eunif_data + (Standard_Unification_Combine.metadata Standard_Unification_Hints.binding (Prio.LOW + 1)) + (*TODO: should we also remove the recursive hint unifier here? time will tell...*) + (*#> Standard_Unification_Combine.delete_eunif_data + (Standard_Unification_Combine.metadata Standard_Unification_Hints_Rec.binding Prio.LOW)*) + |> Context.proof_map + #> Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify binders\]] text\Standard unification hints using @{ML Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify} when looking for hints, without using fallback list of unifiers, are accessible via @{attribute uhint}. \<^emph>\Note:\ there will be no recursive usage of unification hints when searching for potential unification hints in this case. See also @{dir "../Examples"}.\ declare [[ucombine add = \Standard_Unification_Combine.eunif_data - (Standard_Unification_Hints_Rec.try_hints - |> Unification_Combinator.norm_unifier - (#norm_term Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify) - |> K) - (Standard_Unification_Combine.default_metadata Standard_Unification_Hints_Rec.binding)\]] - -declare [[ucombine add = \Standard_Unification_Combine.eunif_data (Standard_Unification_Hints.try_hints |> Unification_Combinator.norm_unifier (#norm_term Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify) |> K) - (Standard_Unification_Combine.default_metadata Standard_Unification_Hints.binding)\]] + (Standard_Unification_Combine.metadata Standard_Unification_Hints.binding (Prio.LOW + 1))\]] text\Examples see @{dir "../Examples"}.\ end diff --git a/thys/ML_Unification/Unification_Hints/ML_Unification_Hints_Base.thy b/thys/ML_Unification/Unification_Hints/ML_Unification_Hints_Base.thy new file mode 100644 --- /dev/null +++ b/thys/ML_Unification/Unification_Hints/ML_Unification_Hints_Base.thy @@ -0,0 +1,30 @@ +\<^marker>\creator "Kevin Kappelmann"\ +section \Unification Hints\ +theory ML_Unification_Hints_Base + imports + ML_Conversion_Utils + ML_Functor_Instances + ML_Generic_Data_Utils + ML_Priorities + ML_Term_Index + ML_Term_Utils + ML_Unifiers_Base + ML_Unification_Parsers +begin + +paragraph \Summary\ +text \A generalisation of unification hints, originally introduced in \<^cite>\"unif-hints"\. +We support a generalisation that +\<^enum> allows additional universal variables in premises +\<^enum> allows non-atomic left-hand sides for premises +\<^enum> allows arbitrary functions to perform the matching/unification of a hint with a disagreement pair. + +General shape of a hint: +\\y1...yn. (\x1...xn1. lhs1 \ rhs1) \ ... \ (\x1...xnk. lhsk \ rhsk) \ lhs \ rhs\ +\ + +ML_file\unification_hints_base.ML\ +ML_file\unification_hints.ML\ +ML_file\term_index_unification_hints.ML\ + +end diff --git a/thys/ML_Unification/Unification_Hints/term_index_unification_hints.ML b/thys/ML_Unification/Unification_Hints/term_index_unification_hints.ML --- a/thys/ML_Unification/Unification_Hints/term_index_unification_hints.ML +++ b/thys/ML_Unification/Unification_Hints/term_index_unification_hints.ML @@ -1,384 +1,387 @@ (* Title: ML_Unification/term_index_unification_hints.ML Author: Kevin Kappelmann Unification hints stored in a term index. *) @{parse_entries (sig) PARSE_TERM_INDEX_UNIFICATION_HINTS_MODE [add, del]} @{parse_entries (sig) PARSE_TERM_INDEX_UNIFICATION_HINTS_ARGS [concl_unifier, normalisers, prems_unifier, retrieval, hint_preprocessor, prio]} @{parse_entries (sig) PARSE_TERM_INDEX_UNIFICATION_HINTS_CONTEXT_ARGS [concl_unifier, normalisers, prems_unifier, retrieval, hint_preprocessor]} signature TERM_INDEX_UNIFICATION_HINTS_ARGS = sig structure PA : PARSE_TERM_INDEX_UNIFICATION_HINTS_ARGS structure PCA : PARSE_TERM_INDEX_UNIFICATION_HINTS_CONTEXT_ARGS val PCA_entries_from_PA_entries : ('a, 'b, 'c, 'd, 'e, 'f) PA.entries -> ('a, 'b, 'c, 'd, 'e) PCA.entries val PA_entries_from_PCA_entries : ('a, 'b, 'c, 'd, 'e) PCA.entries -> 'f -> ('a, 'b, 'c, 'd, 'e, 'f) PA.entries val UHA_PA_entries_from_PCA_entries : ('a, 'b, 'c, 'd, 'e) PCA.entries -> ('a, 'b, 'c) Unification_Hints_Args.PA.entries structure PM : PARSE_TERM_INDEX_UNIFICATION_HINTS_MODE type mode = PM.key val parse_mode : mode parser type hint_prio = Unification_Hints_Base.unif_hint * Prio.prio val pretty_hint_prio : Proof.context -> hint_prio -> Pretty.T val sort_hint_prios : hint_prio list -> hint_prio list val mk_retrieval : Term_Normalisation.term_normaliser -> ('ti -> term -> hint_prio list) -> 'ti -> Unification_Hints_Base.hint_retrieval val mk_sym_retrieval : Term_Normalisation.term_normaliser -> ('ti -> term -> hint_prio list) -> 'ti -> Unification_Hints_Base.hint_retrieval type 'ti args = (Unification_Base.unifier, Unification_Base.normalisers, Unification_Base.unifier, 'ti -> Unification_Hints_Base.hint_retrieval, Unification_Hints_Base.hint_preprocessor, Prio.prio) PA.entries type 'ti context_args = (Unification_Base.unifier, Unification_Base.normalisers, Unification_Base.unifier, 'ti -> Unification_Hints_Base.hint_retrieval, Unification_Hints_Base.hint_preprocessor) PCA.entries val arg_parsers : (ML_Code_Util.code parser, ML_Code_Util.code parser, ML_Code_Util.code parser, ML_Code_Util.code parser, ML_Code_Util.code parser, Prio.prio context_parser) PA.entries end structure Term_Index_Unification_Hints_Args : TERM_INDEX_UNIFICATION_HINTS_ARGS = struct structure UB = Unification_Base structure EN = Envir_Normalisation structure UHB = Unification_Hints_Base structure UHA = Unification_Hints_Args structure TUHP = Prio @{parse_entries (struct) PA [concl_unifier, normalisers, prems_unifier, retrieval, hint_preprocessor, prio]} @{parse_entries (struct) PCA [concl_unifier, normalisers, prems_unifier, retrieval, hint_preprocessor]} fun PCA_entries_from_PA_entries {concl_unifier = concl_unifier, normalisers = normalisers, prems_unifier = prems_unifier, retrieval = retrieval, hint_preprocessor = hint_preprocessor,...} = {concl_unifier = concl_unifier, normalisers = normalisers, prems_unifier = prems_unifier, retrieval = retrieval, hint_preprocessor = hint_preprocessor} fun PA_entries_from_PCA_entries {concl_unifier = concl_unifier, normalisers = normalisers, prems_unifier = prems_unifier, retrieval = retrieval, hint_preprocessor = hint_preprocessor} prio = {concl_unifier = concl_unifier, normalisers = normalisers, prems_unifier = prems_unifier, retrieval = retrieval, hint_preprocessor = hint_preprocessor, prio = SOME prio} fun UHA_PA_entries_from_PCA_entries {concl_unifier = concl_unifier, normalisers = normalisers, prems_unifier = prems_unifier,...} = {concl_unifier = concl_unifier, normalisers = normalisers, prems_unifier = prems_unifier} @{parse_entries (struct) PM [add, del]} type mode = PM.key val parse_mode = PM.parse_key type hint_prio = UHB.unif_hint * TUHP.prio fun pretty_hint_prio ctxt (hint, prio) = Pretty.block [ UHB.pretty_hint ctxt hint, Pretty.enclose " (" ")" [Pretty.str "priority: ", Prio.pretty prio] ] (*FIXME: use Prio.Table instead of sorted lists*) val sort_hint_prios = sort (Prio.ord o apply2 snd) val hint_seq_of_hint_prios = sort_hint_prios #> Seq.of_list #> Seq.map fst fun mk_retrieval norm_term retrieve ti _ _ (t, _) _ = norm_term t |> retrieve ti |> hint_seq_of_hint_prios fun interleave [] ys = ys | interleave xs [] = xs | interleave (x :: xs) (y :: ys) = x :: y :: interleave xs ys fun mk_sym_retrieval norm_term retrieve ti _ _ (t1, t2) _ = interleave (norm_term t1 |> retrieve ti) (norm_term t2 |> retrieve ti |> map (apfst Unification_Hints_Base.symmetric_hint)) |> hint_seq_of_hint_prios type 'ti args = (UB.unifier, UB.normalisers, UB.unifier, 'ti -> UHB.hint_retrieval, UHB.hint_preprocessor, TUHP.prio) PA.entries type 'ti context_args = (UB.unifier, UB.normalisers, UB.unifier, 'ti -> UHB.hint_retrieval, UHB.hint_preprocessor) PCA.entries val arg_parsers = { concl_unifier = UHA.PA.get_concl_unifier_safe UHA.arg_parsers, normalisers = UHA.PA.get_normalisers_safe UHA.arg_parsers, prems_unifier = UHA.PA.get_prems_unifier_safe UHA.arg_parsers, retrieval = SOME (Parse_Util.nonempty_code (K "retrieval function must not be empty")), hint_preprocessor = SOME (Parse_Util.nonempty_code (K "hint preprocessor must not be empty")), prio = SOME Prio.parse } end signature TERM_INDEX_UNIFICATION_HINTS = sig include HAS_LOGGER structure UH : UNIFICATION_HINTS (*underlying term index*) structure TI : TERM_INDEX structure Data : GENERIC_DATA val get_retrieval : Context.generic -> Term_Index_Unification_Hints_Args.hint_prio TI.term_index -> Unification_Hints_Base.hint_retrieval val map_retrieval : ( (Term_Index_Unification_Hints_Args.hint_prio TI.term_index -> Unification_Hints_Base.hint_retrieval) -> Term_Index_Unification_Hints_Args.hint_prio TI.term_index -> Unification_Hints_Base.hint_retrieval) -> Context.generic -> Context.generic val get_hint_preprocessor : Context.generic -> Unification_Hints_Base.hint_preprocessor val map_hint_preprocessor : (Unification_Hints_Base.hint_preprocessor -> Unification_Hints_Base.hint_preprocessor) -> Context.generic -> Context.generic val get_index : Context.generic -> Term_Index_Unification_Hints_Args.hint_prio TI.term_index val map_index : (Term_Index_Unification_Hints_Args.hint_prio TI.term_index -> Term_Index_Unification_Hints_Args.hint_prio TI.term_index) -> Context.generic -> Context.generic val pretty_index : Proof.context -> Pretty.T val add_hint_prio : Term_Index_Unification_Hints_Args.hint_prio -> Context.generic -> Context.generic val del_hint : Unification_Hints_Base.unif_hint -> Context.generic -> Context.generic val mk_retrieval : Term_Index_Unification_Hints_Args.hint_prio TI.retrieval -> Term_Index_Unification_Hints_Args.hint_prio TI.term_index -> Unification_Hints_Base.hint_retrieval val mk_sym_retrieval : Term_Index_Unification_Hints_Args.hint_prio TI.retrieval -> Term_Index_Unification_Hints_Args.hint_prio TI.term_index -> Unification_Hints_Base.hint_retrieval val try_hints : Unification_Base.unifier val binding : binding val attribute : (Term_Index_Unification_Hints_Args.mode * ((ML_Code_Util.code, ML_Code_Util.code, ML_Code_Util.code, ML_Code_Util.code, ML_Code_Util.code, Prio.prio) Term_Index_Unification_Hints_Args.PA.entries)) * Position.T -> attribute val parse_attribute : attribute context_parser val setup_attribute : string option -> local_theory -> local_theory end functor Term_Index_Unification_Hints(A : sig structure FIA : FUNCTOR_INSTANCE_ARGS structure TI : TERM_INDEX val init_args : (Term_Index_Unification_Hints_Args.hint_prio TI.term_index) Term_Index_Unification_Hints_Args.context_args end) : TERM_INDEX_UNIFICATION_HINTS = struct structure UHB = Unification_Hints_Base structure TUHA = Term_Index_Unification_Hints_Args structure TUHP = Prio structure PA = TUHA.PA structure PCA = TUHA.PCA structure PM = TUHA.PM structure FIU = Functor_Instance_Util(A.FIA) structure TI = A.TI structure MCU = ML_Code_Util structure PU = Parse_Util val logger = Logger.setup_new_logger Unification_Hints_Base.logger FIU.FIA.full_name @{functor_instance struct_name = UH and functor_name = Unification_Hints and accessor = FIU.accessor and id = FIU.FIA.id and more_args = \val init_args = TUHA.UHA_PA_entries_from_PCA_entries A.init_args\} -fun are_hint_variants ctxt hp = +fun are_hint_variants hp = let - val tp = apply2 Thm.prop_of hp - val env = Unification_Util.empty_envir tp - fun match tp = First_Order_Unification.match [] ctxt tp env - in - match tp - |> Seq.maps (fn _ => match (swap tp)) - |> not o fst o General_Util.seq_is_empty - end + val ctp = apply2 Thm.cprop_of hp + val match = Thm.first_order_match + in can match ctp andalso can match (swap ctp) end (* fun are_hint_prio_variants ctxt ((h1, p1), (h2, p2)) = p1 = p2 andalso are_hint_variants ctxt (h1, h2) *) structure Data = Generic_Data(Pair_Generic_Data_Args( struct structure Data1 = Pair_Generic_Data_Args( struct structure Data1 = struct type T = TUHA.hint_prio TI.term_index -> Unification_Hints_Base.hint_retrieval val empty = PCA.get_retrieval A.init_args val merge = fst end structure Data2 = struct type T = UHB.hint_preprocessor val empty = PCA.get_hint_preprocessor A.init_args val merge = fst end end) structure Data2 = Term_Index_Generic_Data_Args( struct type data = TUHA.hint_prio structure TI = TI - fun data_eq ((h1, _), (h2, _))= are_hint_variants (Context.the_local_context ()) (h1, h2) + fun data_eq ((h1, _), (h2, _)) = are_hint_variants (h1, h2) end) end)) val get_retrieval = fst o fst o Data.get val map_retrieval = Data.map o apfst o apfst val get_hint_preprocessor = snd o fst o Data.get val map_hint_preprocessor = Data.map o apfst o apsnd val get_index = snd o Data.get val map_index = Data.map o apsnd fun pretty_index ctxt = get_index (Context.Proof ctxt) |> TI.content |> TUHA.sort_hint_prios |> map (TUHA.pretty_hint_prio ctxt) |> Pretty.fbreaks |> Pretty.block val term_index_key_from_hint = (*TODO: as of now, hints are only indexed on their lhs term; we could index on both terms to avoid more false positives*) UHB.cdest_hint_concl #> fst #> Thm.term_of #> TI.norm_term fun msg_illegal_hint_format ctxt hint = Pretty.block [ Pretty.str "Illegal hint format for ", UHB.pretty_hint ctxt hint ] |> Pretty.string_of fun add_hint_prio (hint, prio) context = let val ctxt = Context.proof_of context val hint = hint |> get_hint_preprocessor context ctxt - val is_hint_variant = curry (are_hint_variants ctxt) hint o fst + val is_hint_variant = curry are_hint_variants hint o fst in (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ Pretty.str "Adding hint ", UHB.pretty_hint ctxt hint ] |> Pretty.string_of); TI.insert is_hint_variant (term_index_key_from_hint hint, (hint, prio)) (get_index context) |> (fn ti => map_index (K ti) context)) handle Term_Index_Base.INSERT => (@{log Logger.WARN} ctxt (fn _ => Pretty.block [ Pretty.str "Hint ", UHB.pretty_hint ctxt hint, Pretty.str " already added." ] |> Pretty.string_of); context) | TERM _ => (@{log Logger.WARN} ctxt (fn _ => msg_illegal_hint_format ctxt hint); context) end fun del_hint hint context = let val ctxt = Context.proof_of context val hint = hint |> get_hint_preprocessor context ctxt - val is_hint_variant = curry (are_hint_variants ctxt) hint o fst + val is_hint_variant = curry are_hint_variants hint o fst in (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ Pretty.str "Deleting hint ", UHB.pretty_hint ctxt hint ] |> Pretty.string_of); TI.delete is_hint_variant (term_index_key_from_hint hint) (get_index context) |> (fn ti => map_index (K ti) context)) handle Term_Index_Base.DELETE => (@{log Logger.WARN} ctxt (fn _ => Pretty.block [ Pretty.str "Hint ", UHB.pretty_hint ctxt hint, Pretty.str " not found." ] |> Pretty.string_of); context) | TERM _ => (@{log Logger.WARN} ctxt (fn _ => msg_illegal_hint_format ctxt hint); context) end val mk_retrieval = TUHA.mk_retrieval TI.norm_term val mk_sym_retrieval = TUHA.mk_sym_retrieval TI.norm_term -fun try_hints binders ctxt = - let val context = Context.Proof ctxt - in UH.try_hints (get_retrieval context (get_index context)) binders ctxt end +val binding = FIU.mk_binding_id_prefix "uhint" -val binding = FIU.mk_binding_id_prefix "uhint" +fun try_hints binders ctxt tp = + let + val context = Context.Proof ctxt + val _ = @{log Logger.DEBUG} ctxt (fn _ => + Pretty.block [ + Pretty.str "Trying unification hints ", + Binding.pretty binding, + Pretty.str " for ", + Unification_Util.pretty_unif_problem ctxt tp + ] |> Pretty.string_of) + in UH.try_hints (get_retrieval context (get_index context)) binders ctxt tp end val default_entries = PA.empty_entries () |> fold PA.set_entry [PA.prio TUHP.MEDIUM] fun parse_arg_entries mode = let val parsers = TUHA.arg_parsers val parse_value = PA.parse_entry (PA.get_concl_unifier parsers |> Scan.lift) (PA.get_normalisers parsers |> Scan.lift) (PA.get_prems_unifier parsers |> Scan.lift) (PA.get_retrieval parsers |> Scan.lift) (PA.get_hint_preprocessor parsers |> Scan.lift) (PA.get_prio parsers) val parse_entry = Parse_Key_Value.parse_entry' (Scan.lift PA.parse_key) (Scan.lift PU.eq) parse_value in PA.parse_entries_required' Parse.and_list1' [] parse_entry default_entries |> mode = PM.key PM.del ? PU.filter_cut' (is_none o PA.get_prio_safe) (K o K "Priority may not be specified for deletion.") end fun attribute ((mode, entries), pos) = let fun update_index thm = Term_Util.no_dummy_pattern (Thm.prop_of thm) ? ( case mode of PM.add _ => add_hint_prio (thm, PA.get_prio entries) | PM.del _ => del_hint thm) val PCA_entries = TUHA.PCA_entries_from_PA_entries entries val UHA_PA_entries = TUHA.UHA_PA_entries_from_PCA_entries PCA_entries val run_code = ML_Attribute.run_map_context o rpair pos fun default_code (context, _) = (SOME context, NONE) val map_retrieval = case PCA.get_retrieval_safe PCA_entries of SOME c => FIU.code_struct_op "map_retrieval" @ MCU.atomic (MCU.read "K" @ MCU.atomic c) |> run_code | NONE => default_code val map_hint_preprocessor = case PCA.get_hint_preprocessor_safe PCA_entries of SOME c => FIU.code_struct_op "map_hint_preprocessor" @ MCU.atomic (MCU.read "K" @ MCU.atomic c) |> run_code | NONE => default_code in ML_Attribute_Util.apply_attribute (Thm.declaration_attribute update_index) #> ML_Attribute_Util.apply_attribute (UH.attribute (UHA_PA_entries, pos)) #> ML_Attribute_Util.apply_attribute map_retrieval #> map_hint_preprocessor end val parse_attribute = (Scan.lift (Scan.optional TUHA.parse_mode (PM.key PM.add)) :-- (fn mode => PU.optional' ((Scan.lift Parse.where_ |-- parse_arg_entries mode) |> Parse.!!!!) default_entries) |> PU.position') >> attribute val setup_attribute = Attrib.local_setup binding (Parse.!!!! parse_attribute) o the_default ("set unification hints arguments (" ^ FIU.FIA.full_name ^ ")") end diff --git a/thys/ML_Unification/Unification_Hints/unification_hints_base.ML b/thys/ML_Unification/Unification_Hints/unification_hints_base.ML --- a/thys/ML_Unification/Unification_Hints/unification_hints_base.ML +++ b/thys/ML_Unification/Unification_Hints/unification_hints_base.ML @@ -1,145 +1,145 @@ (* Title: ML_Unification/unification_hints_base.ML Author: Kevin Kappelmann, Paul Bachmann Unification hints (introduced in "Hints in unification" by Asperti et al, 2009). We support a generalisation that 1. allows additional universal variables in premises 2. allows non-atomic left-hand sides for premises 3. allows arbitrary functions to perform the matching/unification of a hint with a disagreement pair. General shape of a hint: \\y1...yn. (\x1...xn1. lhs1 \ rhs1) \ ... \ (\x1...xnk. lhsk \ rhsk) \ lhs \ rhs\ *) signature UNIFICATION_HINTS_BASE = sig include HAS_LOGGER type unif_hint = thm val cdest_hint : unif_hint -> (cterm * cterm) list * (cterm * cterm) val cdest_hint_concl : unif_hint -> cterm * cterm val symmetric_hint : unif_hint -> unif_hint val pretty_hint : Proof.context -> unif_hint -> Pretty.T type hint_preprocessor = Proof.context -> thm -> unif_hint val obj_logic_hint_preprocessor : thm -> conv -> hint_preprocessor val try_hint : Unification_Base.matcher -> Unification_Base.normalisers -> unif_hint -> Unification_Base.e_unifier type hint_retrieval = term Binders.binders -> Proof.context -> term * term -> Envir.env -> unif_hint Seq.seq val try_hints : hint_retrieval -> Unification_Base.matcher -> Unification_Base.normalisers -> Unification_Base.e_unifier end structure Unification_Hints_Base : UNIFICATION_HINTS_BASE = struct val logger = Logger.setup_new_logger Unification_Base.logger "Unification_Hints_Base" structure GUtil = General_Util structure TUtil = Term_Util structure CUtil = Conversion_Util type unif_hint = thm val cdest_hint = Thm.cprop_of #> TUtil.strip_cimp #>> map Thm.dest_equals ##> Thm.dest_equals val cdest_hint_concl = Thm.cconcl_of #> Thm.dest_equals (*only flips the conclusion since unification calls for the premises should be symmetric*) val symmetric_hint = Conv.concl_conv ~1 CUtil.symmetric_conv |> CUtil.thm_conv val pretty_hint = Thm.pretty_thm type hint_preprocessor = Proof.context -> thm -> unif_hint fun obj_logic_hint_preprocessor eq_eq_meta_eq default_conv ctxt = let fun eq_conv ct = (if can Thm.dest_equals ct then Conv.all_conv else Conv.rewr_conv eq_eq_meta_eq else_conv default_conv) ct val forall_eq_conv = Conversion_Util.repeat_forall_conv (K o K eq_conv) in Conversion_Util.imp_conv (forall_eq_conv ctxt) eq_conv |> Conversion_Util.thm_conv end (*Tries to apply a hint to solve E-unification of (t1 \? t2). Vars in hint are lifted wrt. the passed binders. Unifies the hint's conclusion with (t1, t2) using match. Unifies resulting unification problems using unify. Normalises the terms and theorems after unify with norms. Returns a sequence of (env, thm) pairs.*) fun try_hint match norms hint unify binders ctxt (t1, t2) (Envir.Envir {maxidx, tenv, tyenv}) = let val dest_all_equals = TUtil.strip_all ##> Logic.dest_equals val rev_binders = rev binders val _ = @{log Logger.TRACE} ctxt (fn _ => Pretty.block [ Pretty.str "Trying hint: ", pretty_hint ctxt hint ] |> Pretty.string_of) (*lift hint to include bound variables and increase indexes*) val lifted_hint = (*"P" is just some dummy proposition variable with appropriate index*) Logic.list_all (map fst rev_binders, Var (("P", maxidx + 1), propT)) |> Thm.cterm_of ctxt (*lift bound variables to the hint*) |> GUtil.flip Thm.lift_rule hint val (hint_prems, hint_concl) = Thm.prop_of lifted_hint |> TUtil.strip_subgoal |> snd (*match hint with unification pair*) val (P, Q) = dest_all_equals hint_concl |> snd val match = match binders ctxt val (no_hint_match, match_env_concl_thmpq) = Envir.Envir {maxidx = Thm.maxidx_of lifted_hint, tenv = tenv, tyenv = tyenv} |> match (P, t1) |> Seq.maps (fn (env, thm1) => match (Q, t2) env |> Seq.map (apsnd (pair thm1))) |> GUtil.seq_is_empty in if no_hint_match then (@{log Logger.TRACE} ctxt (K "Hint does not match."); Seq.empty) else let val _ = @{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ Pretty.str "Hint ", pretty_hint ctxt hint, Pretty.str ((length hint_prems > 0 ? suffix " Unifying premises...") " matched.") ] |> Pretty.string_of) (*unify each hint premise and collect the theorems while iteratively extending the environment*) fun unif_prem prem = let val (params, PQ_prem) = dest_all_equals prem val (binders, ctxt) = Binders.fix_binders params ctxt fun unif_add_result (env, thms) = unify binders ctxt PQ_prem env |> Seq.map (apsnd (fn thm => (binders, thm) :: thms)) in Seq.maps unif_add_result end fun unify_prems (match_env, concl_thmp) = fold unif_prem hint_prems (Seq.single (match_env, [])) |> Seq.map (rpair concl_thmp) fun inst_discharge ((env_res, prem_thms), concl_thmp) = let (*instantiate the theorems*) val lifted_hint_inst = #norm_thm norms ctxt env_res lifted_hint val norm_term = #norm_term norms env_res val mk_norm_cbinders = map (Thm.cterm_of ctxt o norm_term o snd) val norm_unif_thm = #norm_unif_thm norms ctxt env_res fun forall_intr binders = fold Thm.forall_intr (mk_norm_cbinders binders) o norm_unif_thm val prem_thms_inst = map (uncurry forall_intr) prem_thms |> rev val (concl_thmL, concl_thmR) = apply2 norm_unif_thm concl_thmp (*discharge the hint premises*) val thm_res = Drule.implies_elim_list lifted_hint_inst prem_thms_inst |> Drule.forall_elim_list (mk_norm_cbinders rev_binders) |> GUtil.flip Thm.transitive concl_thmR |> Thm.transitive (Unification_Base.symmetric concl_thmL) in (env_res, thm_res) end in Seq.maps unify_prems match_env_concl_thmpq |> Seq.map inst_discharge end end type hint_retrieval = term Binders.binders -> Proof.context -> term * term -> Envir.env -> unif_hint Seq.seq fun try_hints get_hints match norms unif binders ctxt (t1, t2) env = - (@{log Logger.DEBUG} ctxt (fn _ => + (@{log Logger.TRACE} ctxt (fn _ => Pretty.block [ Pretty.str "Trying unification hints for ", Unification_Util.pretty_unif_problem ctxt (t1, t2) ] |> Pretty.string_of); get_hints binders ctxt (t1, t2) env |> Seq.maps (fn hint => try_hint match norms hint unif binders ctxt (t1, t2) env)) end \ No newline at end of file diff --git a/thys/ML_Unification/Unification_Tactics/Assumption/Unify_Assumption_Tactic.thy b/thys/ML_Unification/Unification_Tactics/Assumption/Unify_Assumption_Tactic.thy --- a/thys/ML_Unification/Unification_Tactics/Assumption/Unify_Assumption_Tactic.thy +++ b/thys/ML_Unification/Unification_Tactics/Assumption/Unify_Assumption_Tactic.thy @@ -1,70 +1,65 @@ \<^marker>\creator "Kevin Kappelmann"\ -subsection \Assumption Tactic\ theory Unify_Assumption_Tactic imports - ML_Functor_Instances + Unify_Assumption_Tactic_Base ML_Unifiers - ML_Unification_Parsers begin paragraph \Summary\ -text \Assumption tactic and method with adjustable unifier.\ - -ML_file\unify_assumption_base.ML\ -ML_file\unify_assumption.ML\ +text \Setup of assumption tactic and examples.\ ML\ @{functor_instance struct_name = Standard_Unify_Assumption and functor_name = Unify_Assumption and id = \""\ and more_args = \val init_args = { normalisers = SOME Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify, unifier = SOME Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify }\} \ local_setup \Standard_Unify_Assumption.setup_attribute NONE\ local_setup \Standard_Unify_Assumption.setup_method NONE\ paragraph \Examples\ experiment begin lemma "PROP P \ PROP P" by uassm lemma assumes h: "\P. PROP P" shows "PROP P x" using h by uassm schematic_goal "\x. PROP P (c :: 'a) \ PROP ?Y (x :: 'a)" by uassm schematic_goal a: "PROP ?P (y :: 'a) \ PROP ?P (?x :: 'a)" by uassm \\compare the result with following call\ (* by assumption *) schematic_goal "PROP ?P (x :: 'a) \ PROP P (?x :: 'a)" by uassm \\compare the result with following call\ (* by assumption *) schematic_goal "\x. PROP D \ (\P y. PROP P y x) \ PROP C \ PROP P x" by (uassm unifier = Higher_Order_Unification.unify) \\the line below is equivalent\ (* using [[uassm unifier = Higher_Order_Unification.unify]] by uassm *) text \Unlike @{method assumption}, @{method uassm} will not close the goal if the order of premises of the assumption and the goal are different. Compare the following two examples:\ lemma "\x. PROP D \ (\y. PROP A y \ PROP B x) \ PROP C \ PROP A x \ PROP B x" by uassm lemma "\x. PROP D \ (\y. PROP A y \ PROP B x) \ PROP A x \ PROP C \ PROP B x" by assumption (* by uassm *) end end diff --git a/thys/ML_Unification/Unification_Tactics/Assumption/Unify_Assumption_Tactic_Base.thy b/thys/ML_Unification/Unification_Tactics/Assumption/Unify_Assumption_Tactic_Base.thy new file mode 100644 --- /dev/null +++ b/thys/ML_Unification/Unification_Tactics/Assumption/Unify_Assumption_Tactic_Base.thy @@ -0,0 +1,16 @@ +\<^marker>\creator "Kevin Kappelmann"\ +subsection \Assumption Tactic\ +theory Unify_Assumption_Tactic_Base + imports + ML_Functor_Instances + ML_Tactic_Utils + ML_Unification_Parsers +begin + +paragraph \Summary\ +text \Assumption tactic and method with adjustable unifier.\ + +ML_file\unify_assumption_base.ML\ +ML_file\unify_assumption.ML\ + +end diff --git a/thys/ML_Unification/Unification_Tactics/Fact/Unify_Fact_Tactic.thy b/thys/ML_Unification/Unification_Tactics/Fact/Unify_Fact_Tactic.thy --- a/thys/ML_Unification/Unification_Tactics/Fact/Unify_Fact_Tactic.thy +++ b/thys/ML_Unification/Unification_Tactics/Fact/Unify_Fact_Tactic.thy @@ -1,47 +1,45 @@ \<^marker>\creator "Kevin Kappelmann"\ subsection \Fact Tactic\ theory Unify_Fact_Tactic imports - Unify_Resolve_Tactics + Unify_Fact_Tactic_Base + ML_Unifiers begin paragraph \Summary\ -text \Fact tactic with adjustable unifier.\ - -ML_file\unify_fact_base.ML\ -ML_file\unify_fact.ML\ +text \Setup of fact tactic and examples.\ ML\ @{functor_instance struct_name = Standard_Unify_Fact and functor_name = Unify_Fact and id = \""\ and more_args = \val init_args = { normalisers = SOME Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify, unifier = SOME Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify }\} \ local_setup \Standard_Unify_Fact.setup_attribute NONE\ local_setup \Standard_Unify_Fact.setup_method NONE\ paragraph \Examples\ experiment begin lemma assumes h: "\x y. PROP P x y" shows "PROP P x y" by (ufact h) lemma assumes "\P y. PROP P y x" shows "PROP P x" by (ufact assms where unifier = Higher_Order_Unification.unify) \\the line below is equivalent\ (* using [[ufact unifier = Higher_Order_Unification.unify]] by (ufact assms) *) lemma assumes "\x y. PROP A x \ PROP B x \ PROP P x" shows "\x y. PROP A x \ PROP B x \ PROP P x" using assms by ufact end end diff --git a/thys/ML_Unification/Unification_Tactics/Fact/Unify_Fact_Tactic_Base.thy b/thys/ML_Unification/Unification_Tactics/Fact/Unify_Fact_Tactic_Base.thy new file mode 100644 --- /dev/null +++ b/thys/ML_Unification/Unification_Tactics/Fact/Unify_Fact_Tactic_Base.thy @@ -0,0 +1,14 @@ +\<^marker>\creator "Kevin Kappelmann"\ +subsection \Fact Tactic\ +theory Unify_Fact_Tactic_Base + imports + Unify_Resolve_Tactics_Base +begin + +paragraph \Summary\ +text \Fact tactic with adjustable unifier.\ + +ML_file\unify_fact_base.ML\ +ML_file\unify_fact.ML\ + +end diff --git a/thys/ML_Unification/Unification_Tactics/Resolution/Unify_Resolve_Tactics.thy b/thys/ML_Unification/Unification_Tactics/Resolution/Unify_Resolve_Tactics.thy --- a/thys/ML_Unification/Unification_Tactics/Resolution/Unify_Resolve_Tactics.thy +++ b/thys/ML_Unification/Unification_Tactics/Resolution/Unify_Resolve_Tactics.thy @@ -1,91 +1,89 @@ \<^marker>\creator "Kevin Kappelmann"\ subsection \Resolution Tactics\ theory Unify_Resolve_Tactics imports - Unify_Assumption_Tactic - ML_Method_Utils + Unify_Resolve_Tactics_Base + ML_Unifiers begin paragraph \Summary\ -text \Resolution tactics and methods with adjustable unifier.\ +text \Setup of resolution tactics and examples.\ -ML_file\unify_resolve_base.ML\ -ML_file\unify_resolve.ML\ ML\ @{functor_instance struct_name = Standard_Unify_Resolve and functor_name = Unify_Resolve and id = \""\ and more_args = \val init_args = { normalisers = SOME Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify, unifier = SOME Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify, mode = SOME (Unify_Resolve_Args.PM.key Unify_Resolve_Args.PM.any), chained = SOME (Unify_Resolve_Args.PCM.key Unify_Resolve_Args.PCM.resolve) }\} \ local_setup \Standard_Unify_Resolve.setup_attribute NONE\ local_setup \Standard_Unify_Resolve.setup_method NONE\ paragraph \Examples\ experiment begin lemma assumes h: "\x. PROP D x \ PROP C x" shows "\x. PROP A x \ PROP B x \ PROP C x" apply (urule h) \\the line below is equivalent\ (* apply (rule h) *) oops lemma assumes h: "PROP C x" shows "PROP C x" by (urule h where unifier = First_Order_Unification.unify) \\the line below is equivalent\ (* using [[urule unifier = First_Order_Unification.unify]] by (urule h) *) lemma assumes h: "\x. PROP A x \ PROP D x" shows "\x. PROP A x \ PROP B x \ PROP C x" \\use (r,e,d,f) to specify the resolution mode (resolution, elim, dest, forward)\ apply (urule (d) h) \\the line below is equivalent\ (* apply (drule h) *) oops text\You can specify how chained facts should be used. By default, @{method urule} works like @{method rule}: it uses chained facts to resolve against the premises of the passed rules.\ lemma assumes h1: "\x. (PROP F x \ PROP E x) \ PROP C x" and h2: "\x. PROP F x \ PROP E x" shows "\x. PROP A x \ PROP B x \ PROP C x" \\Compare all of the following calls:\ (* apply (rule h1) *) (* apply (urule h1) *) (* using h2 apply (rule h1) *) (* using h2 apply (urule h1) *) using h2 apply (urule h1 where chained = fact) (* using h2 apply (urule h1 where chained = insert) *) done text\You can specify whether any or every rule must resolve against the goal:\ lemma assumes h1: "\x y. PROP C y \ PROP D x \ PROP C x" and h2: "\x y. PROP C x \ PROP D x" and h3: "\x y. PROP C x" shows "\x. PROP A x \ PROP B x \ PROP C x" using h3 apply (urule h1 h2 where mode = every) (* using h3 apply (urule h1 h2) *) done lemma assumes h1: "\x y. PROP C y \ PROP A x \ PROP C x" and h2: "\x y. PROP C x \ PROP B x \ PROP D x" and h3: "\x y. PROP C x" shows "\x. PROP A x \ PROP B x \ PROP C x" using h3 apply (urule (d) h1 h2 where mode = every) oops end end diff --git a/thys/ML_Unification/Unification_Tactics/Resolution/Unify_Resolve_Tactics_Base.thy b/thys/ML_Unification/Unification_Tactics/Resolution/Unify_Resolve_Tactics_Base.thy new file mode 100644 --- /dev/null +++ b/thys/ML_Unification/Unification_Tactics/Resolution/Unify_Resolve_Tactics_Base.thy @@ -0,0 +1,16 @@ +\<^marker>\creator "Kevin Kappelmann"\ +subsection \Resolution Tactics\ +theory Unify_Resolve_Tactics_Base + imports + Unify_Assumption_Tactic_Base + ML_Unifiers_Base + ML_Method_Utils +begin + +paragraph \Summary\ +text \Resolution tactics and methods with adjustable unifier.\ + +ML_file\unify_resolve_base.ML\ +ML_file\unify_resolve.ML\ + +end diff --git a/thys/ML_Unification/Unifiers/ML_Unifiers.thy b/thys/ML_Unification/Unifiers/ML_Unifiers.thy --- a/thys/ML_Unification/Unifiers/ML_Unifiers.thy +++ b/thys/ML_Unification/Unifiers/ML_Unifiers.thy @@ -1,73 +1,69 @@ \<^marker>\creator "Kevin Kappelmann"\ -section \ML Unifiers\ theory ML_Unifiers imports - ML_Unification_Base ML_Functor_Instances ML_Priorities + ML_Unifiers_Base Simps_To begin paragraph \Summary\ -text \Unification modulo equations and combinators for unifiers.\ +text \More unifiers.\ -paragraph \Combinators\ +paragraph \Derived Unifiers\ -ML_file\unification_combinator.ML\ +ML_file\higher_order_pattern_decomp_unification.ML\ +ML_file\var_higher_order_pattern_unification.ML\ + +paragraph \Unification via Tactics\ + +ML_file\tactic_unification.ML\ + +subparagraph \Unification via Simplification\ + +lemma eq_if_SIMPS_TO_UNIF_if_SIMPS_TO: + assumes "PROP SIMPS_TO t t'" + and "PROP SIMPS_TO_UNIF s t'" + shows "s \ t" + using assms by (simp add: SIMPS_TO_eq SIMPS_TO_UNIF_eq) + +ML_file\simplifier_unification.ML\ + +paragraph \Combining Unifiers\ ML_file\unification_combine.ML\ ML\ @{functor_instance struct_name = Standard_Unification_Combine and functor_name = Unification_Combine and id = \""\} \ local_setup \Standard_Unification_Combine.setup_attribute NONE\ - -paragraph \Standard Unifiers\ - -ML_file\higher_order_unification.ML\ -ML_file\higher_order_pattern_unification.ML\ -ML_file\first_order_unification.ML\ - -subparagraph \Derived Unifiers\ - -ML_file\higher_order_pattern_decomp_unification.ML\ -ML_file\var_higher_order_pattern_unification.ML\ - - -paragraph \Unification via Tactics\ - -ML_file\tactic_unification.ML\ - -subparagraph \Unification via Simplification\ - -ML_file\simplifier_unification.ML\ - paragraph \Mixture of Unifiers\ ML_file\mixed_unification.ML\ ML\ @{functor_instance struct_name = Standard_Mixed_Unification and functor_name = Mixed_Unification and id = \""\ and more_args = \structure UC = Standard_Unification_Combine\} \ declare [[ucombine add = \Standard_Unification_Combine.eunif_data (Var_Higher_Order_Pattern_Unification.e_unify Unification_Combinator.fail_unify |> Unification_Combinator.norm_unifier (#norm_term Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify) |> K) (Standard_Unification_Combine.metadata \<^binding>\var_hop_unif\ Prio.HIGH)\]] declare [[ucombine add = \Standard_Unification_Combine.eunif_data - (Simplifier_Unification.simp_unify - |> Unification_Combinator.norm_closed_unifier + (Simplifier_Unification.simp_unify_progress Envir.aeconv Simplifier_Unification.simp_unify (#norm_term Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify) - |> Unification_Combinator.unifier_from_closed_unifier + Standard_Mixed_Unification.norms_first_higherp_decomp_comb_higher_unify + Standard_Mixed_Unification.first_higherp_decomp_comb_higher_unify + |> Type_Unification.e_unify Unification_Util.unify_types |> K) - (Standard_Unification_Combine.default_metadata \<^binding>\simp_unif\)\]] - + (Standard_Unification_Combine.default_metadata \<^binding>\simp_unif\) + \]] end diff --git a/thys/ML_Unification/Unifiers/ML_Unifiers_Base.thy b/thys/ML_Unification/Unifiers/ML_Unifiers_Base.thy new file mode 100644 --- /dev/null +++ b/thys/ML_Unification/Unifiers/ML_Unifiers_Base.thy @@ -0,0 +1,25 @@ +\<^marker>\creator "Kevin Kappelmann"\ +section \ML Unifiers\ +theory ML_Unifiers_Base + imports + ML_Unification_Base +begin + +paragraph \Summary\ +text \Unification modulo equations and combinators for unifiers.\ + +paragraph \Combinators\ + +ML_file\unification_combinator.ML\ + +paragraph \Type Unifiers\ + +ML_file\type_unification.ML\ + +paragraph \Standard Unifiers\ + +ML_file\higher_order_unification.ML\ +ML_file\higher_order_pattern_unification.ML\ +ML_file\first_order_unification.ML\ + +end diff --git a/thys/ML_Unification/Unifiers/higher_order_pattern_decomp_unification.ML b/thys/ML_Unification/Unifiers/higher_order_pattern_decomp_unification.ML --- a/thys/ML_Unification/Unifiers/higher_order_pattern_decomp_unification.ML +++ b/thys/ML_Unification/Unifiers/higher_order_pattern_decomp_unification.ML @@ -1,141 +1,140 @@ (* Title: ML_Unification/higher_order_pattern_decomp_unification.ML Author: Kevin Kappelmann Tries to decompose terms that are outside the higher-order pattern fragment into a higher-order pattern prefix and a list of remaining arguments. Example: Let 0, 1 be bound variables and f, g, c be constants. The unification problem ?x 0 1 (?y c 0) \\<^sup>? f (g c 0) is outside the HO-pattern fragment, but it can be decomposed into ?x 0 1 \\<^sup>? f and (?y c 0) \\<^sup>? (g c 0). The latter, in turn, can be decomposed into ?y \\<^sup>? g and c \\<^sup>? c and 0 \\<^sup>? 0. *) signature HIGHER_ORDER_PATTERN_DECOMP_UNIFICATION = sig include HAS_LOGGER val e_match : Unification_Base.matcher -> Unification_Base.e_matcher val e_unify : Unification_Base.unifier -> Unification_Base.e_unifier end structure Higher_Order_Pattern_Decomp_Unification : HIGHER_ORDER_PATTERN_DECOMP_UNIFICATION = struct val logger = Logger.setup_new_logger Unification_Base.logger "Higher_Order_Pattern_Decomp_Unification" structure UUtil = Unification_Util -structure HOP = Higher_Order_Pattern_Unification structure UC = Unification_Combinator exception FALLBACK of Pretty.T (*check if sequence is empty or raise FALLBACK exception*) fun seq_try msg sq = General_Util.seq_try (FALLBACK msg) sq (*compute pattern prefixes*) fun bounds_prefix bounds [] = (rev bounds, []) | bounds_prefix bounds (t :: ts) = if is_Bound t andalso not (member (op =) bounds t) then bounds_prefix (t :: bounds) ts else (rev bounds, t :: ts) val bounds_prefix = bounds_prefix [] datatype ('a, 'b) either = Left of 'a | Right of 'b fun bounds_prefix2 (bounds1, _) ([], _) = Left (rev bounds1, []) | bounds_prefix2 (_, bounds2) (_, []) = Right (rev bounds2, []) | bounds_prefix2 (bounds1, bounds2) (s :: ss, t :: ts) = if is_Bound s andalso not (member (op =) bounds1 s) then if is_Bound t andalso not (member (op =) bounds2 t) then bounds_prefix2 (s :: bounds1, t :: bounds2) (ss, ts) else Right (rev bounds2, t :: ts) else Left (rev bounds1, s :: ss) val bounds_prefix2 = bounds_prefix2 ([], []) fun mk_decomp _ (_, []) = NONE (*no decomposition took place*) | mk_decomp h (args, rem) = SOME (list_comb (h, args), rem) fun mk_bounds_decomp h args = bounds_prefix args |> mk_decomp h fun pattern_prefix_match p = case strip_comb p of (v as Var _, args) => mk_bounds_decomp v args | _ => NONE fun pattern_prefix_unif tp = case apply2 strip_comb tp of ((vl as Var _, argsl), (vr as Var _, argsr)) => (case bounds_prefix2 (argsl, argsr) of Left res => mk_decomp vl res |> Option.map Left | Right res => mk_decomp vr res |> Option.map Right) | ((v as Var _, args), _) => mk_bounds_decomp v args |> Option.map Left | (_, (v as Var _, args)) => mk_bounds_decomp v args |> Option.map Right | _ => NONE fun mk_decomp_other ss t = let val (th, ts) = strip_comb t val n = length ts - length ss in if n < 0 then NONE else chop n ts |> mk_decomp th end fun decompose_msg ctxt tp = Pretty.block [ Pretty.str "Decomposing ", UUtil.pretty_unif_problem ctxt tp, Pretty.str " into higher-order pattern prefix and list of remaining arguments." ] |> Pretty.string_of fun decomposed_msg ctxt (ph, th) (ps, ts) = Pretty.block [ Pretty.str "Decomposition result ", map (UUtil.pretty_unif_problem ctxt) ((ph, th) :: (ps ~~ ts)) |> Pretty.separate "," |> Pretty.block ] |> Pretty.string_of val failed_decompose_msg = Pretty.str "Decomposition failed." fun e_match match_pattern match_theory binders ctxt (pt as (p, t)) env = let val failed_sub_patterns_msg = Pretty.str "Matching of sub-patterns failed." in (@{log Logger.DEBUG} ctxt (fn _ => decompose_msg ctxt pt); case pattern_prefix_match p of SOME (ph, ps) => (case mk_decomp_other ps t of SOME (th, ts) => (@{log Logger.TRACE} ctxt (fn _ => decomposed_msg ctxt (ph, th) (ps, ts)); UUtil.strip_comb_strip_comb (K o K I) match_pattern binders ctxt (ph, th) (ps, ts) env |> seq_try failed_sub_patterns_msg) | NONE => raise FALLBACK failed_decompose_msg) | NONE => raise FALLBACK failed_decompose_msg) handle FALLBACK msg => (@{log Logger.DEBUG} ctxt (fn _ => Pretty.breaks [ msg, UUtil.pretty_call_theory_match ctxt pt ] |> Pretty.block |> Pretty.string_of); match_theory binders ctxt pt env) end fun e_unify unif_pattern unif_theory binders ctxt (tp as (s, t)) env = let val unif_patterns = UUtil.strip_comb_strip_comb Envir_Normalisation.norm_thm_types_unif unif_pattern val failed_sub_patterns_msg = Pretty.str "Unification of sub-patterns failed." in (@{log Logger.DEBUG} ctxt (fn _ => decompose_msg ctxt tp); case pattern_prefix_unif tp of SOME (Left (sh, ss)) => (case mk_decomp_other ss t of SOME (th, ts) => (@{log Logger.TRACE} ctxt (fn _ => decomposed_msg ctxt (sh, th) (ss, ts)); unif_patterns binders ctxt (sh, th) (ss, ts) env |> seq_try failed_sub_patterns_msg) | NONE => raise FALLBACK failed_decompose_msg) | SOME (Right (th, ts)) => (case mk_decomp_other ts s of SOME (sh, ss) => (@{log Logger.TRACE} ctxt (fn _ => decomposed_msg ctxt (sh, th) (ss, ts)); unif_patterns binders ctxt (sh, th) (ss, ts) env |> seq_try failed_sub_patterns_msg) | NONE => raise FALLBACK failed_decompose_msg) | NONE => raise FALLBACK failed_decompose_msg) handle FALLBACK msg => (@{log Logger.DEBUG} ctxt (fn _ => Pretty.breaks [ msg, UUtil.pretty_call_theory_unif ctxt tp ] |> Pretty.block |> Pretty.string_of); unif_theory binders ctxt tp env) end end diff --git a/thys/ML_Unification/Unifiers/higher_order_pattern_unification.ML b/thys/ML_Unification/Unifiers/higher_order_pattern_unification.ML --- a/thys/ML_Unification/Unifiers/higher_order_pattern_unification.ML +++ b/thys/ML_Unification/Unifiers/higher_order_pattern_unification.ML @@ -1,476 +1,474 @@ (* Title: ML_Unification/higher_order_pattern_unification.ML Author: Tobias Nipkow, Christine Heinzelmann, Stefan Berghofer, and Kevin Kappelmann TU Muenchen Higher-Order Patterns due to Tobias Nipkow. E-Unification and theorem construction due to Kevin Kappelmann. See also: Tobias Nipkow. Functional Unification of Higher-Order Patterns. In Proceedings of the 8th IEEE Symposium Logic in Computer Science, 1993. TODO: optimize red by special-casing it *) signature HIGHER_ORDER_PATTERN_UNIFICATION = sig include HAS_LOGGER (* e-matching *) + (*precondition: types of terms are matched*) val e_match : Unification_Base.type_matcher -> Unification_Base.matcher -> Unification_Base.e_matcher val match : Unification_Base.matcher val norms_match : Unification_Base.normalisers (* e-unification *) + (*precondition: types of terms are unified*) val e_unify : Unification_Base.type_unifier -> Unification_Base.unifier -> Unification_Base.e_unifier val unify : Unification_Base.unifier val norms_unify : Unification_Base.normalisers end structure Higher_Order_Pattern_Unification : HIGHER_ORDER_PATTERN_UNIFICATION = struct val logger = Logger.setup_new_logger Unification_Base.logger "Higher_Order_Pattern_Unification" (* shared utils *) structure Util = Unification_Util structure Norm = Envir_Normalisation exception FALLBACK (*check if sequence is empty or raise FALLBACK exception*) fun seq_try sq = General_Util.seq_try FALLBACK sq (*predicate: downto0 (is, n) <=> is = [n, n - 1, ..., 0]*) fun downto0 ([], n) = n = ~1 | downto0 (i :: is, n) = i = n andalso downto0 (is, n - 1) fun mkabs (binders, is, t) = let fun abstract i acc = let val ((x, T), _) = nth binders i in Abs (x, T, acc) end in fold_rev abstract is t end exception IDX fun idx [] _ = raise IDX | idx (i :: is) j = if i = j then length is else idx is j fun ints_of bs = (*collects arguments and checks if they are all distinct, bound variables*) let fun dest_check (Bound i) acc = if member (op =) acc i then raise Unification_Base.PATTERN else i :: acc | dest_check _ _ = raise Unification_Base.PATTERN in fold_rev dest_check bs [] end fun app (s, []) = s | app (s, (i :: is)) = app (s $ Bound i, is) (* matching *) fun mapbnd f = let fun mpb d (Bound i) = if i < d then Bound i else Bound (f (i - d) + d) | mpb d (Abs (s, T, t)) = Abs (s, T, mpb (d + 1) t) | mpb d (u1 $ u2) = (mpb d u1) $ (mpb d u2) | mpb _ atom = atom in mpb 0 end fun red (Abs (_, _, s)) (i :: is) js = red s is (i :: js) | red t [] [] = t | red t is jn = app (mapbnd (nth jn) t, is) exception OPEN_TERM fun match_bind (tenv, binders, ixn, T, is, t) = let val js = loose_bnos t in if null is then if null js then Vartab.update_new (ixn, (T, t)) tenv else raise OPEN_TERM else if subset (op =) (js, is) then let val t' = if downto0 (is, length binders - 1) then t else mapbnd (idx is) t in Vartab.update_new (ixn, (T, mkabs (binders, is, t'))) tenv end else raise OPEN_TERM end (** higher-order pattern E-unification **) val norms_match = Util.beta_eta_short_norms_match -fun e_match match_types match_theory_pattern match_theory_fail binders ctxt (pt as (p, t)) env = +fun e_match match_types match_theory_pattern match_theory_fail binders ctxt pt env = let val norm_pt = apfst o (#norm_term norms_match) fun rigid_rigid ctxt p n env = (*normalise the types in rigid-rigid cases*) Util.rigid_rigid Norm.norm_term_types_match match_types ctxt p n env |> seq_try handle Unification_Base.UNIF => Seq.empty (*types do not match*) (*generated theorem is already normalised wrt. the resulting environment*) fun match binders ctxt (pt as (p, t)) (env as Envir.Envir {maxidx, tenv, tyenv}) = let (*call match_theory on failure*) fun handle_failure match_theory failure_msg = (@{log Logger.DEBUG} ctxt (fn _ => Pretty.breaks [ failure_msg (), Util.pretty_call_theory_match ctxt (norm_pt env pt) ] |> Pretty.block |> Pretty.string_of); match_theory binders ctxt pt env) in (case pt of (Abs (np, _, tp), Abs (nt, Tt, tt)) => let val name = if np = "" then nt else np in Util.abstract_abstract (K I) match binders ctxt name Tt (tp, tt) env |> seq_try end (*eta-expand on the fly*) | (Abs (np, Tp, tp), _) => let val Tp' = Envir.subst_type tyenv Tp in Util.abstract_abstract (K I) match binders ctxt np Tp' (tp, incr_boundvars 1 t $ Bound 0) env |> seq_try end | (_, Abs (nt, Tt, tt)) => Util.abstract_abstract (K I) match binders ctxt nt Tt (incr_boundvars 1 p $ Bound 0, tt) env |> seq_try | (Bound i, Bound j) => Util.bound_bound binders ctxt (i, j) |> Seq.map (pair env) | (Free _, Free g) => rigid_rigid ctxt p g env | (Const _, Const d) => rigid_rigid ctxt p d env | _ => case strip_comb p of (Var (x, T), ps) => let val is = ints_of ps val T' = Norm.norm_type_match tyenv T in case Envir.lookup1 tenv (x, T') of NONE => ((let val tenv' = match_bind (tenv, binders, x, T', is, t) val env' = Envir.Envir {maxidx=maxidx, tenv=tenv', tyenv=tyenv} val t' = Binders.replace_binders binders t in Seq.single (env', Unification_Base.reflexive_term ctxt t') end) handle OPEN_TERM => (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ Pretty.str "Failed to unify (open term) ", Util.pretty_unif_problem ctxt (norm_pt env pt) ] |> Pretty.string_of); Seq.empty)) | SOME ph' => if Envir.aeconv (red ph' is [], t) then Seq.single (env, Unification_Base.reflexive_term ctxt (Binders.replace_binders binders t)) else raise FALLBACK end | (ph, ps) => let val (th, ts) = strip_comb t in case (ph, th) of (Abs _, _) => raise Unification_Base.PATTERN | (_, Abs _) => raise Unification_Base.PATTERN | _ => if null ps orelse null ts then raise FALLBACK else (*Note: types of theorems are already normalised ==> we pass the identity type normaliser*) Util.strip_comb_strip_comb (K o K I) match binders ctxt (ph, th) (ps, ts) env |> seq_try end) handle FALLBACK => handle_failure match_theory_fail (fn _ => Pretty.str "Higher-order pattern matching failed.") | Unification_Base.PATTERN => handle_failure match_theory_pattern (fn _ => Pretty.str "Terms not in Pattern fragment.") end - val binder_types = Binders.binder_types binders in (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ Pretty.str "Higher-order pattern matching ", Util.pretty_unif_problem ctxt (norm_pt env pt) ] |> Pretty.string_of); - match_types ctxt (Envir.fastype env binder_types p, fastype_of1 (binder_types, t)) env - |> match binders ctxt pt) + match binders ctxt pt env) handle Unification_Base.UNIF => Seq.empty (*types do not match*) end - (*standard higher-order pattern matching*) val match = e_match Util.match_types Unification_Combinator.fail_match Unification_Combinator.fail_match (*unification*) fun string_of_term ctxt env binders t = (map (Free o fst) binders, t) |> subst_bounds |> Norm.norm_term_unif env |> Syntax.string_of_term ctxt fun bname binders = fst o fst o nth binders fun bnames binders is = map (bname binders) is |> space_implode " " fun proj_fail ctxt (env, binders, F, _, is, t) reason = @{log Logger.DEBUG} ctxt (fn _ => let val f = Term.string_of_vname F val xs = bnames binders is val u = string_of_term ctxt env binders t in cat_lines [ "Cannot unify variable " ^ f ^ " (depending on bound variables [" ^ xs ^ "])", "with term " ^ u, reason () ] end) fun proj_fail_unif ctxt (params as (_, binders, _, _, is, t)) = let fun reason () = let val ys = bnames binders (subtract (op =) is (loose_bnos t)) in "Term contains additional bound variable(s) " ^ ys end in proj_fail ctxt params reason end fun proj_fail_type_app ctxt (params as (env, binders, _, _, _, _)) var_app = let fun reason () = let val var_app = string_of_term ctxt env binders var_app in "Projection " ^ var_app ^ " is not well-typed." end in proj_fail ctxt params reason end fun ocheck_fail ctxt (F, t, binders, env) = @{log Logger.DEBUG} ctxt (fn _ => cat_lines [ "Variable " ^ Term.string_of_vname F ^ " occurs in term", string_of_term ctxt env binders t, "Cannot unify!" ]) fun occurs (F, t, env) = let fun occ (Var (G, T)) = (case Envir.lookup env (G, T) of SOME t => occ t | NONE => F = G) | occ (t1 $ t2) = occ t1 orelse occ t2 | occ (Abs (_, _, t)) = occ t | occ _ = false in occ t end fun ints_of' env ts = ints_of (map (Envir.head_norm env) ts) (*split_type ([T1,....,Tn]---> T) n = ([Tn,...,T1], T)*) fun split_type t n = let fun split (T, 0, Ts) = (Ts, T) | split (Type ("fun", [T1, T2]), n, Ts) = split (T2, n - 1, T1 :: Ts) | split _ = raise Fail "split_type" in split (t, n, []) end fun type_of_G (Envir.Envir {tyenv,...}) (T, n, is) = let val (Ts, U) = split_type (Norm.norm_type_unif tyenv T) n in map (nth Ts) is ---> U end fun mk_hnf (binders, is, G, js) = mkabs (binders, is, app (G, js)) fun mk_new_hnf (env, binders, is, F as (a, _), T, js) = let val (env', G) = Envir.genvar a (env, type_of_G env (T, length is, js)) in Envir.update ((F, T), mk_hnf (binders, is, G, js)) env' end (*mk_proj_list is = [ |is| - k - 1 | 0 <= k < |is| and is[k] >= 0 ]*) fun mk_proj_list is = let fun mk (SOME _) (acc, j) = (j :: acc, j + 1) | mk NONE (acc, j) = (acc, j + 1) in fold_rev mk is ([], 0) |> fst end fun proj ctxt (s, env, binders, is) = let fun trans d i = if i < d then i else idx is (i - d) + d fun pr (s, env, d, binders) = (case Envir.head_norm env s of Abs (a, T, t) => let val (binder, _) = Binders.fix_binder (a, T) ctxt val (t', env') = pr (t, env, d + 1, binder :: binders) in (Abs (a, T, t'), env') end | t => (case strip_comb t of (c as Const _, ts) => let val (ts', env') = prs (ts, env, d, binders) in (list_comb (c, ts'), env') end | (f as Free _, ts) => let val (ts', env') = prs (ts, env, d, binders) in (list_comb (f, ts'), env') end | (Bound i, ts) => let val j = trans d i val (ts', env') = prs (ts, env, d, binders) in (list_comb (Bound j, ts'), env') end | (Var (F as (a, _), Fty), ts) => let val js = ints_of' env ts val js' = map (try (trans d)) js val ks = mk_proj_list js' val ls = map_filter I js' val Hty = type_of_G env (Fty, length js, ks) val (env', H) = Envir.genvar a (env, Hty) val env'' = Envir.update ((F, Fty), mk_hnf (binders, js, H, ks)) env' in (app (H, ls), env'') end | _ => raise Unification_Base.PATTERN)) and prs (s :: ss, env, d, binders) = let val (s', env1) = pr (s, env, d, binders) val (ss', env2) = prs (ss, env1, d, binders) in (s' :: ss', env2) end | prs ([], env, _, _) = ([], env) in if downto0 (is, length binders - 1) then (s, env) else pr (s, env, 0, binders) end (*mk_ff_list (is, js) = [ |is| - k - 1 | 0 <= k < |is| and is[k] = js[k] ]*) fun mk_ff_list (is,js) = let fun mk ([], [], _) = [] | mk (i :: is, j :: js, k) = if i = j then k :: mk (is, js, k - 1) else mk (is, js, k - 1) | mk _ = raise Fail "mk_ff_list" in mk (is, js, length is - 1) end; fun app_free (Envir.Envir {tyenv,...}) binders n T is = let val norm_type = Norm.norm_type_unif tyenv in list_comb (Var (n, norm_type T), map (map_types norm_type o Binders.nth_binder_data binders) is) end fun flexflex1 unify_types ctxt (env, binders, F, Fty, Fty', is, js) = let val env' = unify_types ctxt (Fty, Fty') env val thm = app_free env binders F Fty is |> Unification_Base.reflexive_term ctxt in if is = js then (env', thm) else let val ks = mk_ff_list (is, js) val env'' = mk_new_hnf (env', binders, is, F, Fty, ks) in (env'', thm) end end fun flexflex2 unify_types ctxt (env, binders, F, Fty, is, G, Gty, js) = let val var_app = app_free env binders F Fty is val binder_types = Binders.binder_types binders val env' = unify_types ctxt (apply2 (Envir.fastype env binder_types) (var_app, app (Var (G, Gty), js))) env val thm = Unification_Base.reflexive_term ctxt var_app fun ff (F, Fty, is, G as (a, _), Gty, js) = if subset (op =) (js, is) then let val t = mkabs (binders, is, app (Var (G, Gty), map (idx is) js)) val env'' = Envir.update ((F, Fty), t) env' in (env'', thm) end else let val ks = inter (op =) js is val Hty = type_of_G env' (Fty, length is, map (idx is) ks) val (env'', H) = Envir.genvar a (env', Hty) fun lam is = mkabs (binders, is, app (H, map (idx is) ks)) val env''' = Envir.update ((F, Fty), lam is) env'' |> Envir.update ((G, Gty), lam js) in (env''', thm) end in if is_less (Term_Ord.indexname_ord (G, F)) then ff (F, Fty, is, G, Gty, js) else ff (G, Gty, js, F, Fty, is) end fun flexrigid unify_types ctxt (params as (env, binders, F, Fty, is, t)) = if occurs (F, t, env) then (ocheck_fail ctxt (F, t, binders, env); raise FALLBACK) else let val var_app = app_free env binders F Fty is in let val binder_types = Binders.binder_types binders val env' = unify_types ctxt (apply2 (Envir.fastype env binder_types) (var_app, t)) env val (u, env'') = proj ctxt (t, env', binders, is) val env''' = Envir.update ((F, Fty), mkabs (binders, is, u)) env'' val thm = Unification_Base.reflexive_term ctxt var_app in (env''', thm) end handle IDX => (proj_fail_unif ctxt params; raise OPEN_TERM) | TYPE _ => (proj_fail_type_app ctxt params var_app; raise Unification_Base.UNIF) end (** higher-order pattern E-unification **) val norms_unify = Util.beta_eta_short_norms_unif fun e_unify unify_types unify_theory_pattern unify_theory_fail binders ctxt st env = let fun unif binders ctxt st env = let val (st' as (s', t')) = apply2 (Envir.head_norm env) st fun rigid_rigid ctxt t n env = (*we do not normalise types in base cases*) Util.rigid_rigid (K I) unify_types ctxt t n env |> seq_try handle Unification_Base.UNIF => Seq.empty (*types do not match*) (*call unify_theory on failure*) fun handle_failure unify_theory failure_msg = (@{log Logger.DEBUG} ctxt (fn _ => Pretty.breaks [ failure_msg (), Util.pretty_call_theory_unif ctxt st' ] |> Pretty.block |> Pretty.string_of); (*Note: st' is already normalised*) unify_theory binders ctxt st' env) in (case st' of (Abs (ns, Ts, ts), Abs (nt, Tt, tt)) => ((let val name = if ns = "" then nt else ns in unify_types ctxt (Ts, Tt) env |> Util.abstract_abstract Norm.norm_term_types_unif unif binders ctxt name Ts (ts, tt) |> seq_try end) handle Unification_Base.UNIF => Seq.empty) (*types do not unify*) (*eta-expand on the fly*) | (Abs (ns, Ts, ts), _) => ((let val st = apply2 (Envir.fastype env (Binders.binder_types binders)) st' in unify_types ctxt st env |> Util.abstract_abstract Norm.norm_term_types_unif unif binders ctxt ns Ts (ts, incr_boundvars 1 t' $ Bound 0) |> seq_try end) handle Unification_Base.UNIF => Seq.empty) (*types do not unify*) | (_, Abs _) => unif binders ctxt (t', s') env |> Seq.map (apsnd Unification_Base.symmetric) | (Bound i, Bound j) => Util.bound_bound binders ctxt (i, j) |> Seq.map (pair env) | (Free _, Free g) => rigid_rigid ctxt s' g env | (Const _, Const d) => rigid_rigid ctxt s' d env (*case distinctions on head term*) | _ => case apply2 strip_comb st' of ((Var (F, Fty), ss), (Var (G, Gty), ts)) => (((if F = G then flexflex1 unify_types ctxt (env, binders, F, Fty, Gty, ints_of' env ss, ints_of' env ts) else flexflex2 unify_types ctxt (env, binders, F, Fty, ints_of' env ss, G, Gty, ints_of' env ts)) |> Seq.single) handle Unification_Base.UNIF => Seq.empty) (*types do not unify*) | ((Var (F, Fty), ss), _) => (flexrigid unify_types ctxt (env, binders, F, Fty, ints_of' env ss, t')|> Seq.single handle OPEN_TERM => Seq.empty | Unification_Base.UNIF => Seq.empty) (*types do not unify*) | (_, (Var (F, Fty), ts)) => (flexrigid unify_types ctxt (env, binders, F, Fty, ints_of' env ts, s') |> Seq.single handle OPEN_TERM => Seq.empty | Unification_Base.UNIF => Seq.empty) (*types do not unify*) | ((sh, ss), (th, ts)) => if null ss orelse null ts then raise FALLBACK else (*but we have to normalise in comb cases*) Util.strip_comb_strip_comb Norm.norm_thm_types_unif unif binders ctxt (sh, th) (ss, ts) env |> seq_try) handle FALLBACK => handle_failure unify_theory_fail (fn _ => Pretty.str "Higher-order pattern unification failed.") | Unification_Base.PATTERN => handle_failure unify_theory_pattern (fn _ => Pretty.str "Terms not in Pattern fragment.") end - val binder_types = Binders.binder_types binders in (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ Pretty.str "Higher-order pattern unifying ", Util.pretty_unif_problem ctxt (apply2 (Norm.norm_term_unif env) st) ] |> Pretty.string_of); - unif binders ctxt st (unify_types ctxt (apply2 (Envir.fastype env binder_types) st) env)) + unif binders ctxt st env) handle Unification_Base.UNIF => Seq.empty (*types do not unify*) end (*standard higher-order pattern unification*) val unify = e_unify Util.unify_types Unification_Combinator.fail_unify Unification_Combinator.fail_unify end diff --git a/thys/ML_Unification/Unifiers/higher_order_unification.ML b/thys/ML_Unification/Unifiers/higher_order_unification.ML --- a/thys/ML_Unification/Unifiers/higher_order_unification.ML +++ b/thys/ML_Unification/Unifiers/higher_order_unification.ML @@ -1,60 +1,53 @@ (* Title: ML_Unification/higher_order_unification.ML Author: Kevin Kappelmann TU Muenchen Higher-order unification from the Pure kernel adjusted to fit the Unification_Base.unifier type, i.e. moving flex-flex pairs into the theorem and support of open term unification. *) signature HIGHER_ORDER_UNIFICATION = sig include HAS_LOGGER val unify : Unification_Base.unifier val norms : Unification_Base.normalisers end structure Higher_Order_Unification : HIGHER_ORDER_UNIFICATION = struct val logger = Logger.setup_new_logger Unification_Base.logger "Higher_Order_Unification" structure Util = Unification_Util -fun unify binders ctxt tp env = - let - fun unif env = - let - val init_goal = Logic.mk_equals #> Thm.cterm_of ctxt #> Goal.init - fun create_env_thmq env = - (*replace binders and apply the computed unifier*) - apply2 (Envir_Normalisation.beta_eta_short_norm_term_unif env o Binders.replace_binders binders) tp - (*create a goal such that we can create a theorem including the flex-flex pairs*) - |> init_goal - (*kind of a hack: resolving against the equality theorem will add the - flex-flex pairs to the theorem*) - |> HEADGOAL (match_tac ctxt @{thms Pure.reflexive}) - (*conclude the goal and pair with the environment*) - |> Seq.map (pair env o Goal.conclude) - in - (*find the unifiers*) - Unify.hounifiers (map fst binders) (Context.Proof ctxt, env, [tp]) - (*add the flex-flex pairs to the theorem*) - |> Seq.maps (create_env_thmq o fst) - end - in - ((@{log Logger.DEBUG} ctxt (fn _ => - Pretty.block [ - Pretty.str "Higher-order unifying ", - Util.pretty_unif_problem ctxt (apply2 (Envir.norm_term env) tp) - ] - |> Pretty.string_of); - Util.unify_types ctxt (apply2 (Envir.fastype env (Binders.binder_types binders)) tp) env - |> unif) - handle Unification_Base.UNIF => Seq.empty) (*types do not match*) - end +val unify = + let fun unif binders ctxt tp env = + let + val _ = @{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ + Pretty.str "Higher-order unifying ", + Util.pretty_unif_problem ctxt (apply2 (Envir.norm_term env) tp) + ] |> Pretty.string_of) + val init_goal = Logic.mk_equals #> Thm.cterm_of ctxt #> Goal.init + fun create_env_thmq env = + (*replace binders and apply the computed unifier*) + apply2 (Envir_Normalisation.beta_eta_short_norm_term_unif env o Binders.replace_binders binders) tp + (*create a goal such that we can create a theorem including the flex-flex pairs*) + |> init_goal + (*kind of a hack: resolving against the equality theorem will add the + flex-flex pairs to the theorem*) + |> HEADGOAL (match_tac ctxt @{thms Pure.reflexive}) + (*conclude the goal and pair with the environment*) + |> Seq.map (pair env o Goal.conclude) + in + (*find the unifiers*) + Unify.hounifiers (map fst binders) (Context.Proof ctxt, env, [tp]) + (*add the flex-flex pairs to the theorem*) + |> Seq.maps (create_env_thmq o fst) + end + in Type_Unification.e_unify Util.unify_types unif end val norms = Unification_Util.beta_eta_short_norms_unif end diff --git a/thys/ML_Unification/Unifiers/mixed_unification.ML b/thys/ML_Unification/Unifiers/mixed_unification.ML --- a/thys/ML_Unification/Unifiers/mixed_unification.ML +++ b/thys/ML_Unification/Unifiers/mixed_unification.ML @@ -1,55 +1,55 @@ (* Title: ML_Unification/mixed_unification.ML Author: Kevin Kappelmann Mixture of unification algorithms. *) signature MIXED_UNIFICATION = sig include HAS_LOGGER structure UC : UNIFICATION_COMBINE (*first-order, then higher-order pattern with decomposition, then UC.e_unify, then higher-order unification fallback*) val first_higherp_decomp_comb_higher_unify : Unification_Base.unifier val norms_first_higherp_decomp_comb_higher_unify : Unification_Base.normalisers end functor Mixed_Unification (A : sig structure FIA : FUNCTOR_INSTANCE_ARGS structure UC : UNIFICATION_COMBINE end) : MIXED_UNIFICATION = struct val logger = Logger.setup_new_logger Unification_Base.logger A.FIA.full_name structure UUtil = Unification_Util structure UCO = Unification_Combinator structure UC = A.UC fun first_higherp_decomp_comb_higher_unify binders ctxt tp env = let val unify_types = UUtil.unify_types val comb_higher = UCO.add_fallback_unifier UC.e_unify Higher_Order_Unification.unify val decomp_comb_higher = UCO.add_fallback_unifier (Higher_Order_Pattern_Decomp_Unification.e_unify first_higherp_decomp_comb_higher_unify) comb_higher val higherp_decomp_comb_higher = UCO.add_fallback_unifier - (Higher_Order_Pattern_Unification.e_unify unify_types decomp_comb_higher) - comb_higher + (Higher_Order_Pattern_Unification.e_unify unify_types decomp_comb_higher) + comb_higher + |> Type_Unification.e_unify unify_types val fo_higherp_decomp_comb_higher = UCO.add_fallback_unifier - (First_Order_Unification.e_unify unify_types) - higherp_decomp_comb_higher + (First_Order_Unification.e_unify unify_types) higherp_decomp_comb_higher in (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ Pretty.str "First-order with higher-order pattern with decomposition with ", Binding.pretty UC.binding, Pretty.str " with higher-order fallback unifying ", UUtil.pretty_unif_problem ctxt (apply2 (Envir_Normalisation.norm_term_unif env) tp)] |> Pretty.string_of); fo_higherp_decomp_comb_higher binders ctxt tp env) end val norms_first_higherp_decomp_comb_higher_unify = UUtil.beta_eta_short_norms_unif end diff --git a/thys/ML_Unification/Unifiers/simplifier_unification.ML b/thys/ML_Unification/Unifiers/simplifier_unification.ML --- a/thys/ML_Unification/Unifiers/simplifier_unification.ML +++ b/thys/ML_Unification/Unifiers/simplifier_unification.ML @@ -1,75 +1,115 @@ (* Title: ML_Unification/simplifier_unification.ML Author: Kevin Kappelmann Solving equations for unification problems with the simplifier. *) signature SIMPLIFIER_UNIFICATION = sig include HAS_LOGGER - (*solves "s \\<^sup>? t" via simplification*) - val simp_unify : Unification_Base.closed_unifier - (*solves "SIMPS_TO s t \ rhs" via simplification of s when given a matching theorem + (*solves "SIMPS_TO s t \ rhs" via simplification of s when given a theorem "SIMPS_TO s t \ SIMPS_TO s t \ rhs"*) val SIMPS_TO_unify : thm -> Unification_Base.closed_unifier (*solves "SIMPS_TO_UNIF s t \ rhs" via simplification of s to s', followed by unification of - "s' \\<^sup>? t", when given a matching theorem "SIMPS_TO_UNIF s t \ SIMPS_TO_UNIF s t \ rhs"*) - val SIMPS_TO_UNIF_unify : thm -> Unification_Base.normalisers -> Unification_Base.unifier -> + "s' \\<^sup>? t", when given a theorem "SIMPS_TO_UNIF s t \ SIMPS_TO_UNIF s t \ rhs"*) + val SIMPS_TO_UNIF_unify : thm -> Unification_Base.normalisers -> + Unification_Base.closed_unifier -> Unification_Base.closed_unifier + (*solves "s \\<^sup>? t" via simplification followed by unification*) + val simp_unify : Unification_Base.normalisers -> Unification_Base.closed_unifier -> Unification_Base.closed_unifier + (*solves "s \\<^sup>? t" via simplification followed by unification; aborts if no progress was made + during simplification*) + val simp_unify_progress : (term * term -> bool) -> (Unification_Base.normalisers -> + Unification_Base.closed_unifier -> Unification_Base.closed_unifier) -> + Envir_Normalisation.term_normaliser -> Unification_Base.normalisers -> + Unification_Base.e_unifier end structure Simplifier_Unification : SIMPLIFIER_UNIFICATION = struct val logger = Logger.setup_new_logger Unification_Base.logger "Simplifier_Unification" -(*some "safe" solvers create instantiations via flex-flex pairs, which we disallow*) -val safe_simp_tac = Tactic_Util.safe_simp_tac +structure Util = Tactic_Util -fun simp_unify ctxt tp = - (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ - Pretty.str "Solving unification problem via simplification ", - Unification_Util.pretty_unif_problem ctxt tp - ] |> Pretty.string_of); - Tactic_Unification.unify (Tactic_Unification.env_tac_from_no_inst_tac (safe_simp_tac ctxt) |> K) - ctxt tp) +(*some "safe" solvers create instantiations via flex-flex pairs, which we disallow*) +val safe_simp_tac = Util.safe_simp_tac -fun preprocess_tac ctxt = match_tac ctxt o single +fun match_tac ctxt = Tactic.match_tac ctxt o single fun SIMPS_TO_unify preprocess_rule ctxt = let fun tac (tp as (lhs, _)) = if can Simps_To.dest_SIMPS_TO lhs then (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ Pretty.str "Solving ", Syntax.pretty_term ctxt @{term SIMPS_TO}, Pretty.str " unification problem ", Unification_Util.pretty_unif_problem ctxt tp ] |> Pretty.string_of); - preprocess_tac ctxt preprocess_rule - THEN' safe_simp_tac ctxt - THEN' Simps_To.finish_SIMPS_TO_tac ctxt) + match_tac ctxt preprocess_rule + THEN' Simps_To.SIMPS_TO_tac (safe_simp_tac ctxt) ctxt) else K no_tac in Tactic_Unification.unify (Tactic_Unification.env_tac_from_no_inst_tac o tac) ctxt end +fun SIMPS_TO_UNIF_env_thm_tac norms unif ctxt env tSIMPS_TO_UNIF state = + let + val tp = Simps_To_Unif.dest_SIMPS_TO_UNIF tSIMPS_TO_UNIF + fun norm_resolve (env, thm) = #norm_thm norms ctxt env state |> Thm.elim_implies thm |> pair env + in + Simps_To_Unif.SIMPS_TO_UNIF_env_thmsq (safe_simp_tac ctxt) norms unif ctxt tp env + |> Seq.map norm_resolve + end + fun SIMPS_TO_UNIF_unify preprocess_rule norms unif ctxt = - let fun tac ctxt (tp as (lhs, _)) i (env, state) = - (let - val simps_to_tp = Simps_To_Unif.dest_SIMPS_TO_UNIF lhs - fun solve_tac state = - Simps_To_Unif.SIMPS_TO_UNIF_env_thmsq (safe_simp_tac ctxt) norms unif [] ctxt - simps_to_tp env - |> Seq.map (fn (env, thm) => (env, #norm_thm norms ctxt env state |> Thm.elim_implies thm)) - in + let fun tac (tp as (lhs, _)) i (env, state) = + if can Simps_To_Unif.dest_SIMPS_TO_UNIF lhs + then (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ Pretty.str "Solving ", Syntax.pretty_term ctxt @{term SIMPS_TO_UNIF}, Pretty.str " unification problem ", Unification_Util.pretty_unif_problem ctxt tp ] |> Pretty.string_of); - preprocess_tac ctxt preprocess_rule i state - |> Seq.maps solve_tac) - end) - handle TERM _ => Seq.empty - in Tactic_Unification.unify (tac ctxt) ctxt end + match_tac ctxt preprocess_rule i state + |> Seq.maps (SIMPS_TO_UNIF_env_thm_tac norms unif ctxt env lhs)) + else Seq.empty + in Tactic_Unification.unify tac ctxt end + +fun simp_unify norms unif ctxt = + let + val simp_tac = safe_simp_tac ctxt + fun SIMPS_TO_UNIF_env_thm_tac' env i state = SIMPS_TO_UNIF_env_thm_tac norms unif ctxt env + (Thm.cprem_of state i |> Thm.term_of) state + fun eq_tac tp i (env, state) = + (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ + Pretty.str "Solving unification problem via simplification followed by unification ", + Unification_Util.pretty_unif_problem ctxt tp + ] |> Pretty.string_of); + Util.THEN' ( + match_tac ctxt @{thm eq_if_SIMPS_TO_UNIF_if_SIMPS_TO} + THEN' Simps_To.SIMPS_TO_tac (Simps_To.simp_inst_tac simp_tac) ctxt, + SIMPS_TO_UNIF_env_thm_tac' env) i state) + in Tactic_Unification.unify eq_tac ctxt end + +fun simp_unify_progress teq simp_unify norm_term norms unif binders ctxt (tp as (lhs, rhs)) = + let fun unify ctxt tp' = + let val tp' as (lhs', rhs') = apply2 (Binders.replace_frees binders) tp' + in + (if teq (lhs, lhs') andalso teq (rhs, rhs') + then (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ + Pretty.str "Simplification of ", + Unification_Util.pretty_unif_problem ctxt tp, + Pretty.str " failed (no progress)" + ] |> Pretty.string_of); + Unification_Combinator.fail_unify) + else unif) + binders ctxt tp' + end + in + simp_unify norms unify + |> Unification_Combinator.norm_closed_unifier norm_term + |> Unification_Combinator.replace_binders_closed_unifier + |> (fn unif => unif binders ctxt tp) + end end diff --git a/thys/ML_Unification/Unifiers/tactic_unification.ML b/thys/ML_Unification/Unifiers/tactic_unification.ML --- a/thys/ML_Unification/Unifiers/tactic_unification.ML +++ b/thys/ML_Unification/Unifiers/tactic_unification.ML @@ -1,29 +1,38 @@ (* Title: ML_Unification/tactic_unification.ML Author: Kevin Kappelmann Solving equations for unification problems with environment-aware tactics. *) signature TACTIC_UNIFICATION = sig + include HAS_LOGGER + type env_tactic = Envir.env * thm -> (Envir.env * thm) Seq.seq (*create environment-aware tactic from a tactic that *DOES NOT* instantiate meta variables*) val env_tac_from_no_inst_tac : (int -> tactic) -> int -> env_tactic val unify : (term * term -> int -> env_tactic) -> Unification_Base.closed_unifier end structure Tactic_Unification : TACTIC_UNIFICATION = struct +val logger = Logger.setup_new_logger Unification_Base.logger "Tactic_Unification" + type env_tactic = Envir.env * thm -> (Envir.env * thm) Seq.seq fun env_tac_from_no_inst_tac tac i (env, state) = tac i state |> Seq.map (pair env) -fun unify tac ctxt tp env = +fun unify tac ctxt (tp as (lhs, rhs)) env = (Logic.mk_equals tp |> Thm.cterm_of ctxt |> Goal.init |> Tactic_Util.HEADGOAL (tac tp) o pair env |> Seq.map_filter (try (apsnd (Goal.finish ctxt)))) - handle TYPE _ => Seq.empty + handle TYPE _ => (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ + Pretty.str "Types of terms ", + Unification_Util.pretty_terms ctxt [lhs, rhs], + Pretty.str " not equal" + ] |> Pretty.string_of); + Seq.empty) end diff --git a/thys/ML_Unification/Unifiers/type_unification.ML b/thys/ML_Unification/Unifiers/type_unification.ML new file mode 100644 --- /dev/null +++ b/thys/ML_Unification/Unifiers/type_unification.ML @@ -0,0 +1,48 @@ +(* Title: ML_Unification/type_unification.ML + Author: Kevin Kappelmann +*) +signature TYPE_UNIFICATION = +sig + include HAS_LOGGER + + (*matches/unifies types of given terms and passes unification problem with updated environment + to the fallback matcher/unifier*) + val e_match : Unification_Base.type_matcher -> Unification_Base.e_matcher + val e_unify : Unification_Base.type_unifier -> Unification_Base.e_unifier +end + +structure Type_Unification : TYPE_UNIFICATION = +struct + +val logger = Logger.setup_new_logger Unification_Base.logger "Type_Unification" + +structure Norm = Envir_Normalisation +structure Util = Unification_Util + +fun e_match match_types match_theory binders ctxt (pt as (p, t)) env = + let val binder_types = Binders.binder_types binders + in + (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ + Pretty.str "Matching types of ", + Util.pretty_unif_problem ctxt pt + ] |> Pretty.string_of); + match_types ctxt (Envir.fastype env binder_types p, fastype_of1 (binder_types, t)) env + |> match_theory binders ctxt pt) + handle Unification_Base.UNIF => Seq.empty + end + +fun e_unify unif_types unif_theory binders ctxt st env = + let val binder_types = Binders.binder_types binders + in + (@{log Logger.DEBUG} ctxt (fn _ => + Pretty.block [ + Pretty.str "Unifying types of ", + Util.pretty_unif_problem ctxt (apply2 (Norm.norm_term_unif env) st) + ] + |> Pretty.string_of); + unif_types ctxt (apply2 (Envir.fastype env binder_types) st) env + |> unif_theory binders ctxt st) + handle Unification_Base.UNIF => Seq.empty + end + +end diff --git a/thys/ML_Unification/Unifiers/unification_combinator.ML b/thys/ML_Unification/Unifiers/unification_combinator.ML --- a/thys/ML_Unification/Unifiers/unification_combinator.ML +++ b/thys/ML_Unification/Unifiers/unification_combinator.ML @@ -1,108 +1,132 @@ (* Title: ML_Unification/unification_combinator.ML Author: Kevin Kappelmann Unification combinators. *) signature UNIFICATION_COMBINATOR = sig (* failures *) val fail_closed_unify : Unification_Base.closed_unifier val fail_unify : Unification_Base.unifier val fail_e_unify : Unification_Base.e_unifier val fail_closed_match : Unification_Base.closed_matcher val fail_match : Unification_Base.matcher val fail_e_match : Unification_Base.e_matcher (* fallbacks *) (*"add_fallback_e_unifier u1 u2 u" creates a unifier that calls u1 with fallback unifier "u2 u"*) val add_fallback_e_unifier : Unification_Base.e_unifier -> Unification_Base.e_unifier -> Unification_Base.e_unifier (*"add_fallback_unifier u1 u2" creates a unifier that calls u1 with fallback unifier u2*) val add_fallback_unifier : Unification_Base.e_unifier -> Unification_Base.unifier -> Unification_Base.unifier val add_fallback_e_matcher : Unification_Base.e_matcher -> Unification_Base.e_matcher -> Unification_Base.e_matcher val add_fallback_matcher : Unification_Base.e_matcher -> Unification_Base.matcher -> Unification_Base.matcher (* sequential *) (*try all unifiers in sequence*) val concat_closed_unifiers : Unification_Base.closed_unifier list -> Unification_Base.closed_unifier val concat_closed_matchers : Unification_Base.closed_matcher list -> Unification_Base.closed_matcher val concat_unifiers : Unification_Base.unifier list -> Unification_Base.unifier val concat_matchers : Unification_Base.matcher list -> Unification_Base.matcher val concat_e_unifiers : Unification_Base.e_unifier list -> Unification_Base.e_unifier val concat_e_matchers : Unification_Base.e_matcher list -> Unification_Base.e_matcher (* normalisation *) (*apply normaliser before calling the matcher/unifier*) val norm_closed_unifier : Envir_Normalisation.term_normaliser -> Unification_Base.closed_unifier -> Unification_Base.closed_unifier val norm_closed_matcher : Envir_Normalisation.term_normaliser -> Unification_Base.closed_matcher -> Unification_Base.closed_matcher val norm_unifier : Envir_Normalisation.term_normaliser -> Unification_Base.unifier -> Unification_Base.unifier val norm_matcher : Envir_Normalisation.term_normaliser -> Unification_Base.matcher -> Unification_Base.matcher - (* miscellaneous *) + (* binder replacements *) + val replace_binders_closed_unifier : Unification_Base.closed_unifier -> Unification_Base.unifier + val replace_binders_closed_matcher : Unification_Base.closed_matcher -> Unification_Base.matcher + val replace_binders_unifier : Unification_Base.unifier -> Unification_Base.unifier + val replace_binders_matcher : Unification_Base.matcher -> Unification_Base.matcher + + val replace_frees_closed_unifier : Unification_Base.closed_unifier -> Unification_Base.unifier + val replace_frees_closed_matcher : Unification_Base.closed_matcher -> Unification_Base.matcher + val replace_frees_unifier : Unification_Base.unifier -> Unification_Base.unifier + val replace_frees_matcher : Unification_Base.matcher -> Unification_Base.matcher + (*approximates a unifier (working with bound variables) from a closed unifier by replacing all bound variables with their associated free variables*) val unifier_from_closed_unifier : Unification_Base.closed_unifier -> Unification_Base.unifier val matcher_from_closed_matcher : Unification_Base.closed_matcher -> Unification_Base.matcher end structure Unification_Combinator : UNIFICATION_COMBINATOR = struct structure UB = Unification_Base (* failures *) val fail_closed_unify = K o K o K Seq.empty val fail_unify = K fail_closed_unify val fail_e_unify = K fail_unify val fail_closed_match = fail_closed_unify val fail_match = fail_unify val fail_e_match = fail_e_unify (* fallbacks *) fun add_fallback_e_unifier e_unif1 e_unif2 = e_unif1 o e_unif2 fun add_fallback_unifier e_unif1 e_unif2 = add_fallback_e_unifier e_unif1 (K e_unif2) fail_unify val add_fallback_e_matcher = add_fallback_e_unifier fun add_fallback_matcher e_match1 e_match2 = add_fallback_e_matcher e_match1 (K e_match2) fail_match (* sequential *) fun concat_closed_unifiers unifiers ctxt tp = fold_rev (fn unify => curry Seq.APPEND (unify ctxt tp)) unifiers Seq.fail val concat_closed_matchers = concat_closed_unifiers fun concat_unifiers unifiers binders ctxt tp = fold_rev (fn unify => curry Seq.APPEND (unify binders ctxt tp)) unifiers Seq.fail val concat_matchers = concat_unifiers fun concat_e_unifiers unifiers unif binders ctxt tp = fold_rev (fn unify => curry Seq.APPEND (unify unif binders ctxt tp)) unifiers Seq.fail val concat_e_matchers = concat_e_unifiers (* normalisation *) fun norm_closed_unifier norm_t unif ctxt tp env = unif ctxt (apply2 (norm_t env) tp) env fun norm_closed_matcher norm_p match ctxt (p, t) env = match ctxt (norm_p env p, t) env fun norm_unifier norm_t unif binders ctxt tp env = norm_closed_unifier norm_t (unif (Binders.norm_binders (norm_t env) binders)) ctxt tp env fun norm_matcher norm_p match binders ctxt pt env = norm_closed_matcher norm_p (match (Binders.norm_binders (norm_p env) binders)) ctxt pt env +(* binder replacements *) +fun replace_binders_closed_unifier unif binders ctxt = + unif ctxt o apply2 (Binders.replace_binders binders) +val replace_binders_closed_matcher = replace_binders_closed_unifier + +fun replace_binders_unifier unif binders = replace_binders_closed_unifier (unif binders) binders +fun replace_binders_matcher match binders = replace_binders_closed_matcher (match binders) binders + +fun replace_frees_closed_unifier unif binders ctxt = + unif ctxt o apply2 (Binders.replace_frees binders) +val replace_frees_closed_matcher = replace_frees_closed_unifier + +fun replace_frees_unifier unif binders = replace_frees_closed_unifier (unif binders) binders +fun replace_frees_matcher match binders = replace_frees_closed_matcher (match binders) binders + (* miscellaneous *) -fun unifier_from_closed_unifier unify binders ctxt = - unify ctxt o apply2 (Binders.replace_binders binders) -val matcher_from_closed_matcher = unifier_from_closed_unifier +val unifier_from_closed_unifier = replace_binders_closed_unifier +val matcher_from_closed_matcher = replace_binders_closed_matcher end diff --git a/thys/ML_Unification/Unifiers/var_higher_order_pattern_unification.ML b/thys/ML_Unification/Unifiers/var_higher_order_pattern_unification.ML --- a/thys/ML_Unification/Unifiers/var_higher_order_pattern_unification.ML +++ b/thys/ML_Unification/Unifiers/var_higher_order_pattern_unification.ML @@ -1,70 +1,73 @@ (* Title: ML_Unification/var_higher_order_pattern_unification.ML Author: Kevin Kappelmann Unifies terms of the form ?H b1 ... bn \\<^sup>? t where (1) b1,...,bn are bound variables and (2) ?H does not occur in t with the higher-order pattern matcher. This pattern occurs frequently when unifying theorems with a goal containing bound variables. Example: If we want to solve the goal "\x. ?P x 0" with a lemma ?Q by resolution, the bound variables of the goal are first lifted to lemma, resulting in "?Q x". Next, the terms "?Q x \\<^sup>? ?P x 0" are unified. Unfortunately, this problem falls outside the higher-order pattern fragment since 0 is a constant. However, the problem seems essentially first-order before lifting the bound variables: we should be able to use ?Q to solve any goal G by setting ?Q to G. The unifier of this file returns the expected substitution "?Q := \x. ?P x 0" in this case. *) signature VAR_HIGHER_ORDER_PATTERN_UNIFICATION = sig include HAS_LOGGER val can_var_pattern_unif : term * term -> bool * term list val e_unify : Unification_Base.e_unifier end structure Var_Higher_Order_Pattern_Unification : VAR_HIGHER_ORDER_PATTERN_UNIFICATION = struct val logger = Logger.setup_new_logger Unification_Base.logger "Var_Higher_Order_Pattern_Unification" structure Util = Unification_Util fun can_var_pattern_unif (p, t) = let val (head, args) = strip_comb p in if is_Var head andalso forall is_Bound args andalso not (has_duplicates (op aconv) args) andalso not (Term.exists_subterm (curry (op =) head) t) then (true, args) else (false, []) end fun e_unify unif_theory binders ctxt (tp as (t, p)) env = let exception FALLBACK of Pretty.T - val hop_match_type_unif = Higher_Order_Pattern_Unification.e_match Util.unify_types - Unification_Combinator.fail_match Unification_Combinator.fail_match binders ctxt + val unify_types = Util.unify_types + val hop_match_type_unif = Higher_Order_Pattern_Unification.e_match unify_types + Unification_Combinator.fail_match Unification_Combinator.fail_match + |> Type_Unification.e_unify unify_types + |> (fn unif => unif binders ctxt) val hop_match_type_unif_tp = hop_match_type_unif tp val hop_match_type_unif_pt = hop_match_type_unif (p, t) #> Seq.map (apsnd Unification_Base.symmetric) val seq_try = General_Util.seq_try (FALLBACK (Pretty.str "Higher-order pattern matching failed.")) in (@{log Logger.DEBUG} ctxt (fn _ => Pretty.block [ Pretty.str "Variable head higher-order pattern unifying ", Util.pretty_unif_problem ctxt tp ] |> Pretty.string_of); case apply2 can_var_pattern_unif (tp, (p, t)) of ((true, boundst), (true, boundsp)) => if length boundst >= length boundsp then hop_match_type_unif_tp env |> seq_try else hop_match_type_unif_pt env |> seq_try | ((true, _), _) => hop_match_type_unif_tp env |> seq_try | (_, (true, _)) => hop_match_type_unif_pt env |> seq_try | _ => raise FALLBACK (Pretty.str "Terms not of the form ?H b1 ... bn \\<^sup>? t where ?H does not occur in t.")) handle FALLBACK msg => (@{log Logger.DEBUG} ctxt (fn _ => Pretty.breaks [ msg, Util.pretty_call_theory_unif ctxt tp ] |> Pretty.block |> Pretty.string_of); unif_theory binders ctxt tp env) end end diff --git a/thys/ML_Unification/unification_base.ML b/thys/ML_Unification/unification_base.ML --- a/thys/ML_Unification/unification_base.ML +++ b/thys/ML_Unification/unification_base.ML @@ -1,74 +1,77 @@ (* Title: ML_Unification/unification_base.ML Author: Kevin Kappelmann Basic definitions for unifiers. *) signature UNIFICATION_BASE = sig include HAS_LOGGER val reflexive : cterm -> thm val combination : thm -> thm -> thm val symmetric : thm -> thm val abstract_rule : Proof.context -> string -> cterm -> thm -> thm option val reflexive_term : Proof.context -> term -> thm (*raised on unsupported input*) exception PATTERN (*raised on unification failure for non-sequence outputs (e.g. type unification)*) exception UNIF type type_unifier = Proof.context -> typ * typ -> Envir.env -> Envir.env type type_matcher = type_unifier type closed_unifier = Proof.context -> term * term -> Envir.env -> (Envir.env * thm) Seq.seq (*term binders stores fresh free variables associated to each loose bound variable*) type unifier = term Binders.binders -> closed_unifier type e_unifier = unifier -> unifier type closed_matcher = closed_unifier type matcher = unifier type e_matcher = matcher -> matcher (* normalisers for matchers/unifiers *) type normalisers = { + (*normaliser for result of unifier*) norm_unif_thm : Envir_Normalisation.thm_normaliser, + (*normaliser for other theorems*) norm_thm : Envir_Normalisation.thm_normaliser, + (*normaliser for terms*) norm_term : Envir_Normalisation.term_normaliser } end structure Unification_Base : UNIFICATION_BASE = struct -val logger = Logger.setup_new_logger Logger.root_logger "Unification_Base" +val logger = Logger.setup_new_logger Logger.root "Unification_Base" val reflexive = Thm.reflexive val combination = Thm.combination val symmetric = Thm.symmetric val abstract_rule = Thm_Util.abstract_rule val reflexive_term = reflexive oo Thm.cterm_of exception PATTERN = Pattern.Pattern exception UNIF = Pattern.Unif type type_unifier = Proof.context -> typ * typ -> Envir.env -> Envir.env type type_matcher = type_unifier type closed_unifier = Proof.context -> term * term -> Envir.env -> (Envir.env * thm) Seq.seq type unifier = term Binders.binders -> closed_unifier type e_unifier = unifier -> unifier type closed_matcher = closed_unifier type matcher = unifier type e_matcher = matcher -> matcher type normalisers = { norm_unif_thm : Envir_Normalisation.thm_normaliser, norm_thm : Envir_Normalisation.thm_normaliser, norm_term : Envir_Normalisation.term_normaliser } end