diff --git a/Admin/components/components.sha1 b/Admin/components/components.sha1 --- a/Admin/components/components.sha1 +++ b/Admin/components/components.sha1 @@ -1,476 +1,478 @@ 59a71e08c34ff01f3f5c4af00db5e16369527eb7 Haskabelle-2013.tar.gz 23a96ff4951d72f4024b6e8843262eda988bc151 Haskabelle-2014.tar.gz eccff31931fb128c1dd522cfc85495c9b66e67af Haskabelle-2015.tar.gz ed740867925dcf58692c8d3e350c28e3b4d4a60f Isabelle_app-20210126.tar.gz 8ee375cfc38972f080dbc78f07b68dac03efe968 ProofGeneral-3.7.1.1.tar.gz 847b52c0676b5eb0fbf0476f64fc08c2d72afd0c ProofGeneral-4.1.tar.gz 8e0b2b432755ef11d964e20637d1bc567d1c0477 ProofGeneral-4.2-1.tar.gz 51e1e0f399e934020565b2301358452c0bcc8a5e ProofGeneral-4.2-2.tar.gz 8472221c876a430cde325841ce52893328302712 ProofGeneral-4.2.tar.gz fbe83b522cb37748ac1b3c943ad71704fdde2f82 bash_process-1.1.1.tar.gz bb9ef498cd594b4289221b96146d529c899da209 bash_process-1.1.tar.gz 81250148f8b89ac3587908fb20645081d7f53207 bash_process-1.2.1.tar.gz 97b2491382130a841b3bbaebdcf8720c4d4fb227 bash_process-1.2.2.tar.gz 5c5b7c18cc1dc2a4d22b997dac196da09eaca868 bash_process-1.2.3-1.tar.gz 48b01bd9436e243ffcb7297f08b498d0c0875ed9 bash_process-1.2.3.tar.gz 11815d5f3af0de9022e903ed8702c136591f06fe bash_process-1.2.4-1.tar.gz 729486311833e4eff0fbf2d8041dddad520ca88c bash_process-1.2.4-2.tar.gz 7ae9ec8aab2d8a811842d9dc67d8bf6c179e11ee bash_process-1.2.4.tar.gz 9e21f447bfa0431ae5097301d553dd6df3c58218 bash_process-1.2.tar.gz a65ce644b6094d41e9f991ef851cf05eff5dd0a9 bib2xhtml-20171221.tar.gz 4085dd6060a32d7e0d2e3f874c463a9964fd409b bib2xhtml-20190409.tar.gz f92cff635dfba5d4d77f469307369226c868542c cakeml-2.0.tar.gz e7ffe4238b61a3c1ee87aca4421e7a612e09b836 ci-extras-1.tar.gz e880f31f59bd403fb72fcd3b5afb413c3831a21c csdp-6.1-1.tar.gz 2659100ba8e28e7cb0ecb554178ee5315d4a87f5 csdp-6.1.1.tar.gz a2bd94f4f9281dc70dfda66cf28016c2ffef7ed7 csdp-6.1.tar.gz ec17080269737e4a97b4424a379924c09b338ca2 csdp-6.2.0.tar.gz 70105fd6fbfd1a868383fc510772b95234325d31 csdp-6.x.tar.gz 2f6417b8e96a0e4e8354fe0f1a253c18fb55d9a7 cvc3-2.4.1.tar.gz d70bfbe63590153c07709dea7084fbc39c669841 cvc4-1.5-1.tar.gz 541eac340464c5d34b70bb163ae277cc8829c40f cvc4-1.5-2.tar.gz 1a44895d2a440091a15cc92d7f77a06a2e432507 cvc4-1.5-3.tar.gz c0d8d5929b00e113752d8bf5d11241cd3bccafce cvc4-1.5-4.tar.gz ffb0d4739c10eb098eb092baef13eccf94a79bad cvc4-1.5-5.tar.gz 3682476dc5e915cf260764fa5b86f1ebdab57507 cvc4-1.5.tar.gz a5e02b5e990da4275dc5d4480c3b72fc73160c28 cvc4-1.5pre-1.tar.gz 4d9658fd2688ae8ac78da8fdfcbf85960f871b71 cvc4-1.5pre-2.tar.gz b01fdb93f2dc2b8bcfd41c6091d91b37d6e240f9 cvc4-1.5pre-3.tar.gz 76ff6103b8560f0e2778bbfbdb05f5fa18f850b7 cvc4-1.5pre-4.tar.gz 03aec2ec5757301c9df149f115d1f4f1d2cafd9e cvc4-1.5pre.tar.gz e99560d0b7cb9bafde2b0ec1a3a95af315918a25 cvc4-1.8.tar.gz 842d9526f37b928cf9e22f141884365129990d63 cygwin-20130110.tar.gz cb3b0706d208f104b800267697204f6d82f7b48a cygwin-20130114.tar.gz 3b44cca04855016d5f8cfb5101b2e0579ab80197 cygwin-20130117.tar.gz 1fde9ddf0fa4f398965113d0c0c4f0e97c78d008 cygwin-20130716.tar.gz a03735a53c2963eb0b453f6a7282d3419f28bf38 cygwin-20130916.tar.gz 7470125fc46e24ee188bdaacc6d560e01b6fa839 cygwin-20140520.tar.gz db4dedae026981c5f001be283180abc1962b79ad cygwin-20140521.tar.gz acbc4bf161ad21e96ecfe506266ccdbd288f8a6f cygwin-20140530.tar.gz 3dc680d9eb85276e8c3e9f6057dad0efe2d5aa41 cygwin-20140626.tar.gz 8e562dfe57a2f894f9461f4addedb88afa108152 cygwin-20140725.tar.gz 238d8e30e8e22495b7ea3f5ec36e852e97fe8bbf cygwin-20140813.tar.gz 629b8fbe35952d1551cd2a7ff08db697f6dff870 cygwin-20141024.tar.gz ce93d0b3b2743c4f4e5bba30c2889b3b7bc22f2c cygwin-20150410.tar.gz fa712dd5ec66ad16add1779d68aa171ff5694064 cygwin-20151210.tar.gz 056b843d5a3b69ecf8a52c06f2ce6e696dd275f9 cygwin-20151221.tar.gz 44f3a530f727e43a9413226c2423c9ca3e4c0cf5 cygwin-20161002.tar.gz dd56dd16d861fc6e1a008bf5e9da6f33ed6eb820 cygwin-20161022.tar.gz d9ad7aae99d54e3b9813151712eb88a441613f04 cygwin-20161024.tar.gz f8eb6a0f722e3cfe3775d1204c5c7063ee1f008e cygwin-20170828.tar.gz c22048912b010a5a0b4f2a3eb4d318d6953761e4 cygwin-20170930.tar.gz 5a3919e665947b820fd7f57787280c7512be3782 cygwin-20180604.tar.gz 2aa049170e8088de59bd70eed8220f552093932d cygwin-20190320.tar.gz fb898e263fcf6f847d97f564fe49ea0760bb453f cygwin-20190322.tar.gz cd01fac0ab4fdb50a2bbb6416da3f15a4d540da1 cygwin-20190524.tar.gz caa616fbab14c1fce790a87db5c4758c1322cf28 cygwin-20200116.tar.gz f053a9ab01f0be9cb456560f7eff66a8e7ba2fd2 cygwin-20200323.tar.gz 0107343cd2562618629f73b2581168f0045c3234 cygwin-20201002.tar.gz a3d481401b633c0ee6abf1da07d75da94076574c cygwin-20201130.tar.gz 5b1820b87b25d8f2d237515d9854e3ce54ee331b cygwin-20211002.tar.gz 5dff30be394d88dd83ea584fa6f8063bdcdc21fd cygwin-20211004.tar.gz fffaae24da4d274d34b8dc79a76b478b87ec31dd cygwin-20211007.tar.gz 0fe549949a025d65d52d6deca30554de8fca3b6e e-1.5.tar.gz 2e293256a134eb8e5b1a283361b15eb812fbfbf1 e-1.6-1.tar.gz e1919e72416cbd7ac8de5455caba8901acc7b44d e-1.6-2.tar.gz b98a98025d1f7e560ca6864a53296137dae736b4 e-1.6.tar.gz c11b25c919e2ec44fe2b6ac2086337b456344e97 e-1.8.tar.gz a895a96ec7e6fcc275114bb9b4c92b20fac73dba e-2.0-1.tar.gz 2ebd7e3067a2cdae3cb8b073345827013978d74b e-2.0-2.tar.gz fac44556dd16f666a2c186be30aa6d8c67228bb9 e-2.0-3.tar.gz 5d36fb62912cfcff7f3b99a6266c578aafc288b7 e-2.0-4.tar.gz 3223c51c0b16fe00ced4ae903041fff858e61742 e-2.0-5.tar.gz 6b962a6b4539b7ca4199977973c61a8c98a492e8 e-2.0.tar.gz 66449a7b68b7d85a7189e10735a81069356123b6 e-2.5-1.tar.gz 813b66ca151d7a39b5cacb39ab52acabc2a54845 e-2.5.tar.gz 6e63f9f354b8c06035952845b987080699a12d55 e-2.6-1.tar.gz a3bebab5df4294dac2dd7fd2065a94df00e0b3ff e-2.6.tar.gz 6d34b18ca0aa1e10bab6413045d079188c0e2dfb exec_process-1.0.1.tar.gz 8b9bffd10e396d965e815418295f2ee2849bea75 exec_process-1.0.2.tar.gz e6aada354da11e533af2dee3dcdd96c06479b053 exec_process-1.0.3.tar.gz ae7ee5becb26512f18c609e83b34612918bae5f0 exec_process-1.0.tar.gz 7a4b46752aa60c1ee6c53a2c128dedc8255a4568 flatlaf-0.46-1.tar.gz ed5cbc216389b655dac21a19e770a02a96867b85 flatlaf-0.46.tar.gz d37b38b9a27a6541c644e22eeebe9a339282173d flatlaf-1.0-rc1.tar.gz dac46ce81cee10fb36a9d39b414dec7b7b671545 flatlaf-1.0-rc2.tar.gz d94e6da7299004890c04a7b395a3f2d381a3281e flatlaf-1.0-rc3.tar.gz 7ca3e6a8c9bd837990e64d89e7fa07a7e7cf78ff flatlaf-1.0.tar.gz 9908e5ab721f1c0035c0ab04dc7ad0bd00a8db27 flatlaf-1.2.tar.gz 9534b721b7b78344f3225067ee4df28a5440b87e flatlaf-1.6.4.tar.gz 212a0f1f867511722024cc60156fd71872a16f92 flatlaf-1.6.tar.gz f339234ec18369679be0095264e0c0af7762f351 gnu-utils-20210414.tar.gz 71259aa46134e6cf2c6473b4fc408051b3336490 gnu-utils-20211030.tar.gz 683acd94761ef460cca1a628f650355370de5afb hol-light-bundle-0.5-126.tar.gz 989234b3799fe8750f3c24825d1f717c24fb0214 idea-icons-20210508.tar.gz 20b53cfc3ffc5b15c1eabc91846915b49b4c0367 isabelle_fonts-20151021.tar.gz 736844204b2ef83974cd9f0a215738b767958c41 isabelle_fonts-20151104.tar.gz 9502c1aea938021f154adadff254c5c55da344bd isabelle_fonts-20151106.tar.gz f5c63689a394b974ac0d365debda577c6fa31c07 isabelle_fonts-20151107.tar.gz 812101680b75f7fa9ee8e138ea6314fa4824ea2d isabelle_fonts-20151229.tar.gz 2730e1475c7d655655882e75743e0b451725a274 isabelle_fonts-20151231.tar.gz 1f004a6bf20088a7e8f1b3d4153aa85de6fc1091 isabelle_fonts-20160101.tar.gz 379d51ef3b71452dac34ba905def3daa8b590f2e isabelle_fonts-20160102.tar.gz 878536aab1eaf1a52da560c20bb41ab942971fa3 isabelle_fonts-20160227.tar.gz 8ff0eedf0191d808ecc58c6b3149a4697f29ab21 isabelle_fonts-20160812-1.tar.gz 9283e3b0b4c7239f57b18e076ec8bb21021832cb isabelle_fonts-20160812.tar.gz 620cffeb125e198b91a716da116f754d6cc8174b isabelle_fonts-20160830.tar.gz b70690c85c05d0ca5bc29287abd20142f6ddcfb0 isabelle_fonts-20171222.tar.gz c17c482e411bbaf992498041a3e1dea80336aaa6 isabelle_fonts-20171230.tar.gz 3affbb306baff37c360319b21cbaa2cc96ebb282 isabelle_fonts-20180113.tar.gz bee32019e5d7cf096ef2ea1d836c732e9a7628cc isabelle_fonts-20181124.tar.gz f249bc2c85bd2af9eee509de17187a766b74ab86 isabelle_fonts-20181129.tar.gz 928b5320073d04d93bcc5bc4347b6d01632b9d45 isabelle_fonts-20190210.tar.gz dfcdf9a757b9dc36cee87f82533b43c58ba84abe isabelle_fonts-20190309.tar.gz 95e3acf038df7fdeeacd8b4769930e6f57bf3692 isabelle_fonts-20190406.tar.gz dabcf5085d67c99159007007ff0e9bf775e423d1 isabelle_fonts-20190409.tar.gz 76827987c70051719e117138858930d42041f57d isabelle_fonts-20190717.tar.gz abc8aea3ae471f9313917008ac90e5c1c99e17da isabelle_fonts-20210317.tar.gz 3ff9195aab574fc75ca3b77af0adb33f9b6d7b74 isabelle_fonts-20210318.tar.gz b166b4bd583b6442a5d75eab06f7adbb66919d6d isabelle_fonts-20210319.tar.gz 9467ad54a9ac10a6e7e8db5458d8d2a5516eba96 isabelle_fonts-20210321.tar.gz 1f7a0b9829ecac6552b21e995ad0f0ac168634f3 isabelle_fonts-20210322.tar.gz 667000ce6dd6ea3c2d11601a41c206060468807d isabelle_fonts-20211004.tar.gz 916adccd2f40c55116b68b92ce1eccb24d4dd9a2 isabelle_setup-20210630.tar.gz c611e363287fcc9bdd93c33bef85fa4e66cd3f37 isabelle_setup-20210701.tar.gz a0e7527448ef0f7ce164a38a50dc26e98de3cad6 isabelle_setup-20210709.tar.gz e413706694b0968245ee15183af2d464814ce0a4 isabelle_setup-20210711.tar.gz d2c9fd7b73457a460111edd6eb93a133272935fb isabelle_setup-20210715.tar.gz a5f478ba1088f67c2c86dc2fa7764b6d884e5ae5 isabelle_setup-20210716-1.tar.gz 79fad009cb22aa5e7cb4aed3c810ad5f61790293 isabelle_setup-20210716.tar.gz 692a39f716998e556ec9559c9ca362fc8fc9d5b6 isabelle_setup-20210717-1.tar.gz 7322d6d84d75c486a58ed36630431db4499e3232 isabelle_setup-20210717-2.tar.gz 14f8508bcae9140815bb23e430e26d2cbc504b81 isabelle_setup-20210717.tar.gz ca801d5c380ea896ee32b309ff19ae5f34538963 isabelle_setup-20210718.tar.gz ac9739e38e4fbbfce1a71a0987a57b22f83922d3 isabelle_setup-20210724-1.tar.gz 4554679cc8ea31e539655810a14d14216b383d0e isabelle_setup-20210724-2.tar.gz 127a75ae33e97480d352087fcb9b47a632d77169 isabelle_setup-20210724.tar.gz 309909ec6d43ae460338e9af54c1b2a48adcb1ec isabelle_setup-20210726.tar.gz a14ce46c62c64c3413f3cc9239242e33570d0f3d isabelle_setup-20210922.tar.gz b22066a9dcde6f813352dcf6404ac184440a22df isabelle_setup-20211109.tar.gz 0b2206f914336dec4923dd0479d8cee4b904f544 jdk-11+28.tar.gz e12574d838ed55ef2845acf1152329572ab0cc56 jdk-11.0.10+9.tar.gz 3e05213cad47dbef52804fe329395db9b4e57f39 jdk-11.0.2+9.tar.gz 06ac8993b5bebd02c70f1bd18ce13075f01115f3 jdk-11.0.3+7.tar.gz e7e3cc9b0550c1e5d71197ad8c30f92b622d7183 jdk-11.0.4+11.tar.gz 49007a84a2643a204ce4406770dfd574b97880d9 jdk-11.0.5+10.tar.gz 3c250e98eb82f98afc6744ddc9170d293f0677e1 jdk-11.0.6+10.tar.gz 76cf7a141e15db30bd975089c65c833b58092aa7 jdk-11.0.9+11.tar.gz 71d19df63816e9be1c4c5eb44aea7a44cfadb319 jdk-11.tar.gz 72455a2fdb6cced9cd563f4d5d6134f7a6c34913 jdk-15.0.1+9.tar.gz e8ae300e61b0b121018456d50010b555bc96ce10 jdk-15.0.2+7.tar.gz a426a32ad34014953c0f7d4cc6f44199572e1c38 jdk-17+35.tar.gz 85707cfe369d0d32accbe3d96a0730c87e8639b5 jdk-17.0.1+12.tar.gz 8d83e433c1419e0c0cc5fd1762903d11b4a5752c jdk-6u31.tar.gz 38d2d2a91c66714c18430e136e7e5191af3996e6 jdk-7u11.tar.gz d765bc4ad2f34d494429b2a8c1563c49db224944 jdk-7u13.tar.gz 13a265e4b706ece26fdfa6fc9f4a3dd1366016d2 jdk-7u21.tar.gz 5080274f8721a18111a7f614793afe6c88726739 jdk-7u25.tar.gz dd24d63afd6d17b29ec9cb2b2464d4ff2e02de2c jdk-7u40.tar.gz ec740ee9ffd43551ddf1e5b91641405116af6291 jdk-7u6.tar.gz 71b629b2ce83dbb69967c4785530afce1bec3809 jdk-7u60.tar.gz e119f4cbfa2a39a53b9578d165d0dc44b59527b7 jdk-7u65.tar.gz d6d1c42989433839fe64f34eb77298ef6627aed4 jdk-7u67.tar.gz b66039bc6dc2bdb2992133743005e1e4fc58ae24 jdk-7u72.tar.gz d980055694ddfae430ee001c7ee877d535e97252 jdk-7u76.tar.gz baa6de37bb6f7a104ce5fe6506bca3d2572d601a jdk-7u80.tar.gz 7d5b152ac70f720bb9e783fa45ecadcf95069584 jdk-7u9.tar.gz baf275a68d3f799a841932e4e9a95a1a604058ae jdk-8u102.tar.gz 5442f1015a0657259be0590b04572cd933431df7 jdk-8u11.tar.gz 741de6a4a805a0f9fb917d1845409e99346c2747 jdk-8u112.tar.gz ae7df8bd0c18eb40237cf54cc28933f4893b9c92 jdk-8u121.tar.gz 51531a3a0c16e180ed95cb7d2bd680c2ec0aa553 jdk-8u131.tar.gz e45edcf184f608d6f4a7b966d65a5d3289462693 jdk-8u144.tar.gz 264e806b9300a4fb3b6e15ba0e2c664d4ea698c8 jdk-8u152.tar.gz 84b04d877a2ea3a4e2082297b540e14f76722bc5 jdk-8u162.tar.gz 87303a0de3fd595aa3857c8f7cececa036d6ed18 jdk-8u172.tar.gz 9ae0338a5277d8749b4b4c7e65fc627319d98b27 jdk-8u181.tar.gz cfecb1383faaf027ffbabfcd77a0b6a6521e0969 jdk-8u20.tar.gz 44ffeeae219782d40ce6822b580e608e72fd4c76 jdk-8u31.tar.gz c95ebf7777beb3e7ef10c0cf3f734cb78f9828e4 jdk-8u5.tar.gz 4132cf52d5025bf330d53b96a5c6466fef432377 jdk-8u51.tar.gz 74df343671deba03be7caa49de217d78b693f817 jdk-8u60.tar.gz dfb087bd64c3e5da79430e0ba706b9abc559c090 jdk-8u66.tar.gz 2ac389babd15aa5ddd1a424c1509e1c459e6fbb1 jdk-8u72.tar.gz caa0cf65481b6207f66437576643f41dabae3c83 jdk-8u92.tar.gz 778fd85c827ec49d2d658a832d20e63916186b0d jedit-20210715.tar.gz beb99f2cb0bd4e595c5c597d3970c46aa21616e4 jedit-20210717.tar.gz 33dd96cd83f2c6a26c035b7a0ee57624655224c5 jedit-20210724.tar.gz 0e4fd4d66388ddc760fa5fbd8d4a9a3b77cf59c7 jedit-20210802.tar.gz 258d527819583d740a3aa52dfef630eed389f8c6 jedit-20211019.tar.gz f4f3fcbd54488297a5d2fcd23a2595912d5ba80b jedit-20211103.tar.gz 44775a22f42a9d665696bfb49e53c79371c394b0 jedit_build-20111217.tar.gz a242a688810f2bccf24587b0062ce8027bf77fa2 jedit_build-20120304.tar.gz 4c948dee53f74361c097c08f49a1a5ff9b17bd1d jedit_build-20120307.tar.gz 9c221fe71af8a063fcffcce21672a97aea0a8d5b jedit_build-20120313.tar.gz ed72630f307729df08fdedb095f0af8725f81b9c jedit_build-20120327.tar.gz 6425f622625024c1de27f3730d6811f6370a19cd jedit_build-20120414.tar.gz 7b012f725ec1cc102dc259df178d511cc7890bba jedit_build-20120813.tar.gz 8e1d36f5071e3def2cb281f7fefe9f52352cb88f jedit_build-20120903.tar.gz 8fa0c67f59beba369ab836562eed4e56382f672a jedit_build-20121201.tar.gz 06e9be2627ebb95c45a9bcfa025d2eeef086b408 jedit_build-20130104.tar.gz c85c0829b8170f25aa65ec6852f505ce2a50639b jedit_build-20130628.tar.gz 5de3e399be2507f684b49dfd13da45228214bbe4 jedit_build-20130905.tar.gz 87136818fd5528d97288f5b06bd30c787229eb0d jedit_build-20130910.tar.gz c63189cbe39eb8104235a0928f579d9523de78a9 jedit_build-20130925.tar.gz 65cc13054be20d3a60474d406797c32a976d7db7 jedit_build-20130926.tar.gz 30ca171f745adf12b65c798c660ac77f9c0f9b4b jedit_build-20131106.tar.gz 054c1300128f8abd0f46a3e92c756ccdb96ff2af jedit_build-20140405.tar.gz 4a963665537ea66c69de4d761846541ebdbf69f2 jedit_build-20140511.tar.gz a9d637a30f6a87a3583f265da51e63e3619cff52 jedit_build-20140722.tar.gz f29391c53d85715f8454e1aaa304fbccc352928f jedit_build-20141018.tar.gz d7206d4c9d14d3f4c8115422b7391ffbcc6e80b4 jedit_build-20141026.tar.gz f15d36abc1780875a46b6dbd4568e43b776d5db6 jedit_build-20141104.tar.gz 14ce124c897abfa23713928dc034d6ef0e1c5031 jedit_build-20150228.tar.gz b5f7115384c167559211768eb5fe98138864473b jedit_build-20151023.tar.gz 8ba7b6791be788f316427cdcd805daeaa6935190 jedit_build-20151124.tar.gz c70c5a6c565d435a09a8639f8afd3de360708e1c jedit_build-20160330.tar.gz d4e1496c257659cf15458d718f4663cdd95a404e jedit_build-20161024.tar.gz d806c1c26b571b5b4ef05ea11e8b9cf936518e06 jedit_build-20170319.tar.gz 7bcb202e13358dd750e964b2f747664428b5d8b3 jedit_build-20180417.tar.gz 23c8a05687d05a6937f7d600ac3aa19e3ce59c9c jedit_build-20180504.tar.gz 9c64ee0705e5284b507ca527196081979d689519 jedit_build-20181025.tar.gz cfa65bf8720b9b798ffa0986bafbc8437f44f758 jedit_build-20181026.tar.gz 847492b75b38468268f9ea424d27d53f2d95cef4 jedit_build-20181203.tar.gz 536a38ed527115b4bf2545a2137ec57b6ffad718 jedit_build-20190120.tar.gz 58b9f03e5ec0b85f8123c31f5d8092dae5803773 jedit_build-20190130.tar.gz ec0aded5f2655e2de8bc4427106729e797584f2f jedit_build-20190224.tar.gz 1e53598a02ec8d8736b15f480cbe2c84767a7827 jedit_build-20190508.tar.gz b9c6f49d3f6ebe2e85a50595ce7412d01a4314ac jedit_build-20190717.tar.gz 1c753beb93e92e95e99e8ead23a68346bd1af44a jedit_build-20200610.tar.gz 533b1ee6459f59bcbe4f09e214ad2cb990fb6952 jedit_build-20200908.tar.gz f9966b5ed26740bb5b8bddbfe947fcefaea43d4d jedit_build-20201223.tar.gz 0bdbd36eda5992396e9c6b66aa24259d4dd7559c jedit_build-20210201.tar.gz a0744f1948abdde4bfb51dd4769b619e7444baf1 jedit_build-20210510-1.tar.gz 837d6c8f72ecb21ad59a2544c69aadc9f05684c6 jedit_build-20210510.tar.gz 7bdae3d24b10261f6cb277446cf9ecab6062bd6f jedit_build-20210708.tar.gz 0bd2bc2d9a491ba5fc8dd99df27c04f11a72e8fa jfreechart-1.0.14-1.tar.gz 8122526f1fc362ddae1a328bdbc2152853186fee jfreechart-1.0.14.tar.gz d911f63a5c9b4c7335bb73f805cb1711ce017a84 jfreechart-1.5.0.tar.gz d84b7d8ef273afec55284327fca7dd20f5ecb77a jfreechart-1.5.1.tar.gz 6fa0c221ef55919b684449f0111a8112358e94ff jfreechart-1.5.3.tar.gz c8a19a36adf6cefa779d85f22ded2f4654e68ea5 jortho-1.0-1.tar.gz 2155e0bdbd29cd3d2905454de2e7203b9661d239 jortho-1.0-2.tar.gz ffe179867cf5ffaabbb6bb096db9bdc0d7110065 jortho-1.0.tar.gz 6c737137cc597fc920943783382e928ea79e3feb kodkodi-1.2.16.tar.gz afb04f4048a87bb888fe7b05b0139cb060c7925b kodkodi-1.5.2-1.tar.gz 5f95c96bb99927f3a026050f85bd056f37a9189e kodkodi-1.5.2.tar.gz 0634a946b216f7f07f1a0f7e28cf345daa28828f kodkodi-1.5.3.tar.gz 52e95b3493d71902f9df89d0bb59d0046a5f0c63 kodkodi-1.5.4-1.tar.gz 267189c637de26cf304d699cfa95389da002b250 kodkodi-1.5.4.tar.gz 3ecdade953bb455ed2907952be287d7e5cf6533b kodkodi-1.5.5.tar.gz 8aa939f5127290eb9a99952d375be9ffbf90c43b kodkodi-1.5.6-1.tar.gz 6b12bf3f40b16fae8ff22aa39171fa018d107cb3 kodkodi-1.5.6.tar.gz c8b2e632f3ab959a4e037833a45e6360c8b72a99 kodkodi-1.5.7.tar.gz 377e36efb8608e6c828c7718d890e97fde2006a4 linux_app-20131007.tar.gz 759848095e2ad506083d92b5646947e3c32f27a0 linux_app-20191223.tar.gz 1a449ce69ac874e21804595d16aaaf5a0d0d0c10 linux_app-20200110.tar.gz 0aab4f73ff7f5e36f33276547e10897e1e56fb1d macos_app-20130716.tar.gz ad5d0e640ce3609a885cecab645389a2204e03bb macos_app-20150916.tar.gz 400af57ec5cd51f96928d9de00d077524a6fe316 macos_app-20181205.tar.gz 3bc42b8e22f0be5ec5614f1914066164c83498f8 macos_app-20181208.tar.gz 5fb1a2d21b220d0e588790c0203ac87c10ed0870 minisat-2.2.1-1.tar.gz ae76bfaade3bf72ff6b2d3aafcd52fa45609fcd1 minisat-2.2.1.tar.gz eda10c62da927a842c0a8881f726eac85e1cb4f7 naproche-20210122.tar.gz edcb517b7578db4eec1b6573b624f291776e11f6 naproche-20210124.tar.gz d858eb0ede6aea6b8cc40de63bd3a17f8f9f5300 naproche-20210129.tar.gz 810ee0f35adada9bf970c33fd80b986ab2255bf3 naproche-20210201.tar.gz +37bb6d934cfaf157efcadb349a0244d145ce15b0 naproche-20211211.tar.gz +d098dd0873b1720a77dc4e060267f9a6c93f341a naproche-2d99afe5c349.tar.gz 4a4e56fd03b7ba4edd38046f853873a90cf55d1a naproche-4ad61140062f.tar.gz 77252e0b40f89825b9b5935f9f0c4cd5d4e7012a naproche-6d0d76ce2f2a.tar.gz 9c02ecf93863c3289002c5e5ac45a83e2505984c naproche-755224402e36.tar.gz e1b34e8f54e7e5844873612635444fed434718a1 naproche-7d0947a91dd5.tar.gz 26df569cee9c2fd91b9ac06714afd43f3b37a1dd nunchaku-0.3.tar.gz e573f2cbb57eb7b813ed5908753cfe2cb41033ca nunchaku-0.5.tar.gz 3d7b7690dfd09e25ad56e64b519f61f06e3ab706 old_vampire-4.2.2.tar.gz fe57793aca175336deea4f5e9c0d949a197850ac opam-1.2.2.tar.gz eb499a18e7040ca0fe1ca824c9dcb2087c47c9ba opam-2.0.3-1.tar.gz 002f74c9e65e650de2638bf54d7b012b8de76c28 opam-2.0.3.tar.gz ddb3b438430d9565adbf5e3d913bd52af8337511 opam-2.0.6.tar.gz fc66802c169f44511d3be30435eb89a11e635742 opam-2.0.7.tar.gz 108e947d17e9aa6170872614492d8f647802f483 opam-2.1.0.tar.gz 1c8cb6a8f4cbeaedce2d6d1ba8fc7e2ab3663aeb polyml-5.4.1.tar.gz a3f9c159a0ee9a63b7a5d0c835ed9c2c908f8b56 polyml-5.5.0-1.tar.gz 7d604a99355efbfc1459d80db3279ffa7ade3e39 polyml-5.5.0-2.tar.gz b3d776e6744f0cd2773d467bc2cfe1de3d1ca2fd polyml-5.5.0-3.tar.gz 1812e9fa6d163f63edb93e37d1217640a166cf3e polyml-5.5.0.tar.gz 36f5b8224f484721749682a3655c796a55a2718d polyml-5.5.1-1.tar.gz 36f78f27291a9ceb13bf1120b62a45625afd44a6 polyml-5.5.1.tar.gz a588640dbf5da9ae15455b02ef709764a48637dc polyml-5.5.2-1.tar.gz 4b690390946f7bfb777b89eb16d6f08987cca12f polyml-5.5.2-2.tar.gz 5b31ad8556e41dfd6d5e85f407818be399aa3d2a polyml-5.5.2-3.tar.gz 532f6e8814752aeb406c62fabcfd2cc05f8a7ca8 polyml-5.5.2.tar.gz 1c53f699d35c0db6c7cf4ea51f2310adbd1d0dc5 polyml-5.5.3-20150820.tar.gz b4b624fb5f34d1dc814fb4fb469fafd7d7ea018a polyml-5.5.3-20150908.tar.gz b668e1f43a41608a8eb365c5e19db6c54c72748a polyml-5.5.3-20150911.tar.gz 1f5cd9b1390dab13861f90dfc06d4180cc107587 polyml-5.5.3-20150916.tar.gz f78896e588e8ebb4da57bf0c95210b0f0fa9e551 polyml-5.6-1.tar.gz 21fa0592b7dfd23269063f42604438165630c0f0 polyml-5.6-2.tar.gz 03ba81e595fa6d6df069532d67ad3195c37d9046 polyml-5.6-20151123.tar.gz 822f489c18e38ce5ef979ec21dccce4473e09be6 polyml-5.6-20151206.tar.gz bd6a448f0e0d5787747f4f30ca661f9c1868e4a7 polyml-5.6-20151223.tar.gz 5b70c12c95a90d858f90c1945011289944ea8e17 polyml-5.6-20160118.tar.gz 5b19dc93082803b82aa553a5cfb3e914606c0ffd polyml-5.6.tar.gz 80b923fca3533bf291ff9da991f2262a98b68cc4 polyml-5.7-20170217.tar.gz 381a70cecf0fdee47f6842e2bdb5107ed52adab6 polyml-5.7.1-1.tar.gz 39dac33b569ac66f76126b8f4edc6d9227bd8a63 polyml-5.7.1-2.tar.gz 0b896ccc35bd3f2541cd55e6f0ed14637ed9fc68 polyml-5.7.1-4.tar.gz 262450ac9966abebae2e1d4f9ae703cfe0f5d8d9 polyml-5.7.1-5.tar.gz 1aeb57877d694db7fe4d4395287cddf3bc77710b polyml-5.7.1-6.tar.gz e3e7e20b1e0e5d5d68df4cd4caa1e1a7410d46b6 polyml-5.7.1-7.tar.gz 1430533c09b17f8be73798a47a5f409d43a04cf4 polyml-5.7.1-8.tar.gz 171b5783b88522a35e4822b19ef8ba838c04f494 polyml-5.7.1.tar.gz 5fbcab1da2b5eb97f24da2590ece189d55b3a105 polyml-5.7.tar.gz 51e024225b460900da5279f0b91b217085f98cf9 polyml-5.8-20190220.tar.gz 20a83fa58d497b533150defe39bcd4540529b25f polyml-5.8-20190306.tar.gz 9f0e9cd10df4c3383b063eb076e8b698ca50c3d0 polyml-5.8.1-20191101.tar.gz f46deb909d645ac8c140968e4d32b5763beb9add polyml-5.8.1-20191113.tar.gz 36a40a981b57daae0463d14940a8edf6fa1af179 polyml-5.8.1-20191114.tar.gz 525b05536b08c11a1eae943fe6818a8622326084 polyml-5.8.1-20191124.tar.gz 9043828803483ca14df64488dff014ad050a6d34 polyml-5.8.1-20200228.tar.gz 1186607e2c43b77db86731f12fbedb531ca50a21 polyml-5.8.1-20200708.tar.gz 22ae16bf7850e73b903d2ca8eb506da05b441cf3 polyml-5.8.1.tar.gz cb8e85387315f62dcfc6b21ec378186e58068f76 polyml-5.8.2.tar.gz d1fd6eced69dc1df7226432fcb824568e0994ff2 polyml-5.8.tar.gz fb40145228f84513a9b083b54678a7d61b9c34c4 polyml-5.9-5d4caa8f7148.tar.gz 0f1c903b043acf7b221821d8b6374b3f943a122b polyml-5.9-610a153b941d.tar.gz 5f00a47b8f5180b33e68fcc6c343b061957a0a98 polyml-5.9-960de0cd0795.tar.gz 7056b285af67902b32f5049349a064f073f05860 polyml-5.9-cc80e2b43c38.tar.gz 0c396bd6b46ff11a2432b91aab2be0248bd9b0a4 polyml-5.9.tar.gz 49f1adfacdd6d29fa9f72035d94a31eaac411a97 polyml-test-0a6ebca445fc.tar.gz 2a8c4421e0a03c0d6ad556b3c36c34eb11568adb polyml-test-1236652ebd55.tar.gz 8e83fb5088cf265902b8da753a8eac5fe3f6a14b polyml-test-159dc81efc3b.tar.gz a0064c157a59e2706e18512a49a6dca914fa17fc polyml-test-1b2dcf8f5202.tar.gz 4e6543dbbb2b2aa402fd61428e1c045c48f18b47 polyml-test-79534495ee94.tar.gz 853ab0e9ff2b73790cc80a2d36cbff8b03e50a8e polyml-test-7a7b742897e9.tar.gz 85bfda83d138e936fdafd68ed3627b1058e5c2c3 polyml-test-7e49fce62e3d.tar.gz c629cd499a724bbe37b962f727e4ff340c50299d polyml-test-8529546198aa.tar.gz 7df4857d73dbc9edda25a6ad329e47639e70fadf polyml-test-8fda4fd22441.tar.gz 2b7c02b67feb2f44dda6938a7244f4257e7c580c polyml-test-905dae2ebfda.tar.gz 3dfdc58e5d9b28f038a725e05c9c2f2ce0bb2632 polyml-test-a3cfdf648da-1.tar.gz e2f075b0cc709f4f7f6492b725362f9010b2c6d1 polyml-test-a3cfdf648da-2.tar.gz 33568f69ce813b7405386ddbefa14ad0342bb8f0 polyml-test-a3cfdf648da.tar.gz 4bedaac4f1fb9a9199aa63695735063c47059003 polyml-test-a444f281ccec.tar.gz f3031692edcc5d8028a42861e4e40779f0f9d3e1 polyml-test-b68438d33c69.tar.gz cb2318cff6ea9293cd16a4435a4fe28ad9dbe0b8 polyml-test-cf46747fee61.tar.gz 67ffed2f98864721bdb1e87f0ef250e4c69e6160 polyml-test-d68c6736402e.tar.gz b4ceeaac47f3baae41c2491a8368b03217946166 polyml-test-e7a662f8f9c4.tar.gz 609c7d09d3ed01156ff91261e801e2403ff93729 polyml-test-e8d82343b692.tar.gz b6d87466e9b44e8ef4a2fac74c96b139080a506a polyml-test-f54aa41240d0.tar.gz d365f3fc11c2427cafc62b3c79951880a1476ebb polyml-test-f86ae3dc1686.tar.gz a619177143fea42a464f49bb864665407c07a16c polyml-test-fb4f42af00fa.tar.gz 53123dc011b2d4b4e8fe307f3c9fa355718ad01a postgresql-42.1.1.tar.gz 3a5d31377ec07a5069957f5477a4848cfc89a594 postgresql-42.1.4.tar.gz 7d6ef4320d5163ceb052eb83c1cb3968f099a422 postgresql-42.2.18.tar.gz e7cd5c7955e9eb5ce8cd07feb97230b23d2eec40 postgresql-42.2.2.tar.gz 1aaa38429dc9aa7b1095394d9a7ba3465f8d6e04 postgresql-42.2.24.tar.gz 231b33c9c3c27d47e3ba01b399103d70509e0731 postgresql-42.2.5.tar.gz 6335fbc0658e447b5b9bc48c9ad36e33a05bb72b postgresql-42.2.9.tar.gz f132329ca1045858ef456cc08b197c9eeea6881b postgresql-9.4.1212.tar.gz 0885e1f1d8feaca78d2f204b6487e6eec6dfab4b scala-2.10.0.tar.gz f7dc7a4e1aea46408fd6e44b8cfacb33af61afbc scala-2.10.1.tar.gz 207e4916336335386589c918c5e3f3dcc14698f2 scala-2.10.2.tar.gz 21c8ee274ffa471ab54d4196ecd827bf3d43e591 scala-2.10.3.tar.gz d4688ddaf83037ca43b5bf271325fc53ae70e3aa scala-2.10.4.tar.gz 44d12297a78988ffd34363535e6a8e0d94c1d8b5 scala-2.11.0.tar.gz 14f20de82b25215a5e055631fb147356400625e6 scala-2.11.1.tar.gz 4fe9590d08e55760b86755d3fab750e90ac6c380 scala-2.11.2.tar.gz 27a296495b2167148de06314ed9a942f2dbe23fe scala-2.11.4.tar.gz 4b24326541161ce65424293ca9da3e7c2c6ab452 scala-2.11.5.tar.gz e7cf20e3b27c894c6127c7a37042c1667f57385e scala-2.11.6.tar.gz 4810c1b00719115df235be1c5991aa6ea7186134 scala-2.11.7.tar.gz 3eca4b80710996fff87ed1340dcea2c5f6ebf4f7 scala-2.11.8.tar.gz 0004e53f885fb165b50c95686dec40d99ab0bdbd scala-2.12.0.tar.gz 059cbdc58d36e3ac1fffcccd9139ecd34f271882 scala-2.12.10.tar.gz 82056106aa6fd37c159ea76d16096c20a749cccd scala-2.12.11.tar.gz fe7ff585acffaad7f0dd4a1d079134d15c26ed0d scala-2.12.12.tar.gz 74a8c3dab3a25a87357996ab3e95d825dc820fd0 scala-2.12.2.tar.gz d66796a68ec3254b46b17b1f8ee5bcc56a93aacf scala-2.12.3.tar.gz 1636556167dff2c191baf502c23f12e09181ef78 scala-2.12.4.tar.gz 8171f494bba54fb0d01c887f889ab8fde7171c2a scala-2.12.5.tar.gz 54c1b06fa2c5f6c2ab3d391ef342c0532cd7f392 scala-2.12.6.tar.gz 02358f00acc138371324b6248fdb62eed791c6bd scala-2.12.7.tar.gz 201c05ae9cc382ee6c08af49430e426f6bbe0d5a scala-2.12.8.tar.gz a0622fe75c3482ba7dc3ce74d58583b648a1ff0d scala-2.13.4-1.tar.gz ec53cce3c5edda1145ec5d13924a5f9418995c15 scala-2.13.4.tar.gz f51981baf34c020ad103b262f81796c37abcaa4a scala-2.13.5.tar.gz 0a7cab09dec357dab7819273f2542ff1c3ea0968 scala-2.13.6.tar.gz 1f8532dba290c6b2ef364632f3f92e71da93baba scala-2.13.7.tar.gz b447017e81600cc5e30dd61b5d4962f6da01aa80 scala-2.8.1.final.tar.gz 5659440f6b86db29f0c9c0de7249b7e24a647126 scala-2.9.2.tar.gz abe7a3b50da529d557a478e9f631a22429418a67 smbc-0.4.1.tar.gz cbd491c0feba1d21019d05564e76dd04f592ccb4 spass-3.8ds-1.tar.gz edaa1268d82203067657aabcf0371ce7d4b579b9 spass-3.8ds-2.tar.gz 43b5afbcad575ab6817d2289756ca22fd2ef43a9 spass-3.8ds.tar.gz b016a785f1f78855c00d351ff598355c3b87450f sqlite-jdbc-3.18.0-1.tar.gz b85b5bc071a59ef2a8326ceb1617d5a9a5be41cf sqlite-jdbc-3.18.0.tar.gz e56117a67ab01fb24c7fc054ede3160cefdac5f8 sqlite-jdbc-3.20.0.tar.gz 27aeac6a91353d69f0438837798ac4ae6f9ff8c5 sqlite-jdbc-3.23.1.tar.gz 4d17611857fa3a93944c1f159c0fd2a161967aaf sqlite-jdbc-3.27.2.1.tar.gz 806be457eb79408fcc5a72aeca3f64b2d89a6b63 sqlite-jdbc-3.30.1.tar.gz cba2b194114216b226d75d49a70d1bd12b141ac8 sqlite-jdbc-3.32.3.2.tar.gz 29306acd6ce9f4c87032b2c271c6df035fe7d4d3 sqlite-jdbc-3.34.0.tar.gz 8a2ca4d02cfedbfe4dad4490f1ed3ddba33a009a sqlite-jdbc-3.36.0.3.tar.gz 8d20968603f45a2c640081df1ace6a8b0527452a sqlite-jdbc-3.8.11.2.tar.gz 2369f06e8d095f9ba26df938b1a96000e535afff ssh-java-20161009.tar.gz a2335d28b5b95d8d26500a53f1a9303fc5beaf36 ssh-java-20190323.tar.gz fdc415284e031ee3eb2f65828cbc6945736fe995 stack-1.9.1.tar.gz 6e19948ff4a821e2052fc9b3ddd9ae343f4fcdbb stack-1.9.3.tar.gz f969443705aa8619e93af5b34ea98d15cd7efaf1 stack-2.1.3.tar.gz ebd0221d038966aa8bde075f1b0189ff867b02ca stack-2.5.1.tar.gz fa2d882ec45cbc8c7d2f3838b705a8316696dc66 stack-2.7.3.tar.gz 1f4a2053cc1f34fa36c4d9d2ac906ad4ebc863fd sumatra_pdf-2.1.1.tar.gz 601e08d048d8e50b0729429c8928b667d9b6bde9 sumatra_pdf-2.3.2.tar.gz 14d46c2eb1a34821703da59d543433f581e91df3 sumatra_pdf-2.4.tar.gz 44d67b6742919ce59a42368fc60e2afa210a3e42 sumatra_pdf-2.5.2.tar.gz 89719a13bc92810730a430973684629426ed1b2a sumatra_pdf-3.0.tar.gz f5afcc82f8e734665d38867e99475d3ad0d5ed15 sumatra_pdf-3.1.1.tar.gz a45eca5c1277f42f87bb8dc12a3074ccf5488221 sumatra_pdf-3.1.2-1.tar.gz 3b3239b2e6f8062b90d819f3703e30a50f4fa1e7 sumatra_pdf-3.1.2-2.tar.gz 8486387f61557147ec06b1f637117c017c8f0528 sumatra_pdf-3.1.2.tar.gz e8648878f908e93d64a393231ab21fdac976a9c2 sumatra_pdf-3.3.3.tar.gz 869ea6d8ea35c8ba68d7fcb028f16b2b7064c5fd vampire-1.0.tar.gz 399f687b56575b93e730f68c91c989cb48aa34d8 vampire-4.2.2.tar.gz 0402978ca952f08eea73e483b694928ac402a304 vampire-4.5.1-1.tar.gz 26d9d171e169c6420a08aa99eda03ef5abb9c545 vampire-4.5.1.tar.gz 4571c042efd6fc3097e105a528826959acd888a3 vampire-4.6.tar.gz 98c5c79fef7256db9f64c8feea2edef0a789ce46 verit-2016post.tar.gz 52ba18a6c96b53c5ae9b179d5a805a0c08f1da6d verit-2020.10-rmx-1.tar.gz b6706e74e20e14038e9b38f0acdb5639a134246a verit-2020.10-rmx.tar.gz d33e1e36139e86b9e9a48d8b46a6f90d7863a51c verit-2021.06-rmx-1.tar.gz c11d1120fcefaec79f099fe2be05b03cd2aed8b9 verit-2021.06-rmx.tar.gz b576fd5d89767c1067541d4839fb749c6a68d22c verit-2021.06.1-rmx.tar.gz 19c6e5677b0a26cbc5805da79d00d06a66b7a671 verit-2021.06.2-rmx.tar.gz 81d21dfd0ea5c58f375301f5166be9dbf8921a7a windows_app-20130716.tar.gz fe15e1079cf5ad86f3cbab4553722a0d20002d11 windows_app-20130905.tar.gz e6a43b7b3b21295853bd2a63b27ea20bd6102f5f windows_app-20130906.tar.gz 8fe004aead867d4c82425afac481142bd3f01fb0 windows_app-20130908.tar.gz d273abdc7387462f77a127fa43095eed78332b5c windows_app-20130909.tar.gz c368908584e2bca38b3bcb20431d0c69399fc2f0 windows_app-20131130.tar.gz c3f5285481a95fde3c1961595b4dd0311ee7ac1f windows_app-20131201.tar.gz 14807afcf69e50d49663d5b48f4b103f30ae842b windows_app-20150821.tar.gz ed106181510e825bf959025d8e0a2fc3f78e7a3f windows_app-20180417.tar.gz e809e4ab0d33cb413a7c47dd947e7dbdfcca1c24 windows_app-20181002.tar.gz 9e96ba128a0617a9020a178781df49d48c997e19 windows_app-20181006.tar.gz 1c36a840320dfa9bac8af25fc289a4df5ea3eccb xz-java-1.2-1.tar.gz 2ae13aa17d0dc95ce254a52f1dba10929763a10d xz-java-1.2.tar.gz c22196148fcace5443a933238216cff5112948df xz-java-1.5.tar.gz 4368ee09154dff42666a8c87e072261745619e51 xz-java-1.6.tar.gz 63f5fa09e92a895cb9aea27d7142abc86c487d25 xz-java-1.8.tar.gz a06875bdadd653627a68d2083c5178c1264d8fc6 xz-java-1.9.tar.gz 4530a1aa6f4498ee3d78d6000fa71a3f63bd077f yices-1.0.28.tar.gz 3a8f77822278fe9250890e357248bc678d8fac95 z3-3.2-1.tar.gz 12ae71acde43bd7bed1e005c43034b208c0cba4c z3-3.2.tar.gz d94a716502c8503d63952bcb4d4176fac8b28704 z3-4.0.tar.gz 86e721296c400ada440e4a9ce11b9e845eec9e25 z3-4.3.0.tar.gz a8917c31b31c182edeec0aaa48870844960c8a61 z3-4.3.2pre-1.tar.gz 06b30757ff23aefbc30479785c212685ffd39f4d z3-4.3.2pre.tar.gz ed37c451b9b748901295898bf713b24d22cc8c17 z3-4.4.0_4.4.1.tar.gz 93e7e4bddc6afcf87fe2b6656cfcb1b1acd0a4f8 z3-4.4.0pre-1.tar.gz b1bc411c2083fc01577070b56b94514676f53854 z3-4.4.0pre-2.tar.gz 4c366ab255d2e9343fb635d44d4d55ddd24c76d0 z3-4.4.0pre-3.tar.gz 517ba7b94c1985416c5b411c8ae84456367eb231 z3-4.4.0pre.tar.gz 6e5d7a65757cac970eb5ad28cd62130c99f42c23 z3-4.4.1.tar.gz aa20745f0b03e606b1a4149598e0c7572b63c657 z3-4.8.3.tar.gz 9dfeb39c87393af7b6a34118507637aa53aca05e zipperposition-2.0-1.tar.gz b884c60653002a7811e3b652ae0515e825d98667 zipperposition-2.0.tar.gz b129ec4f8a4474953ec107536298ee08a01fbebc zipperposition-2.1-1.tar.gz 5f53a77efb5cbe9d0c95d74a1588cc923bd711a7 zipperposition-2.1.tar.gz diff --git a/CONTRIBUTORS b/CONTRIBUTORS --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -1,1074 +1,1078 @@ For the purposes of the license agreement in the file COPYRIGHT, a 'contributor' is anybody who is listed in this file (CONTRIBUTORS) or who is listed as an author in one of the source files of this Isabelle distribution. +Contributions to this Isabelle version +-------------------------------------- + + Contributions to Isabelle2021-1 ------------------------------- * September / October 2021: Jasmin Blanchette, Martin Desharnais, Mathias Fleury, Makarius Wenzel Upgrade of automatic theorem provers in Sledgehammer and the "smt" proof method. * July - September 2021: Makarius Wenzel Significantly improved Isabelle/Haskell library. * July - September 2021: Jasmin Blanchette, Martin Desharnais Various improvements to Sledgehammer. * September 2021: Dominique Unruh New theory of infinite sums (theory HOL-Analysis.Infinite_Sum), ordering of complex numbers (theory HOL-Library.Complex_Order), and products of uniform spaces (theory HOL-Analysis.Product_Vector). * August 2021: Fabian Huch, TU München Improved HTML presentation: links to formal entities. * November 2020 / July 2021: Norbert Schirmer, Apple Various improvements and cleanup of session "HOL-Statespace". * July 2021: Florian Haftmann Further consolidation of bit operations and word types. * June 2021: Florian Haftmann More context situations susceptible to global_interpretation. * March 2021: Lukas Stevens New order prover. * March 2021: Florian Haftmann Dedicated session for combinatorics. * March 2021: Simon Foster and Leo Freitas More symbol definitions for Z Notation: Isabelle fonts and LaTeX macros. * February 2021: Manuel Eberl New material in sessions HOL-Analysis and HOL-Probability, most notably Hoeffding's inequality and the negative binomial distribution * January 2021: Jakub Kądziołka Some lemmas for HOL-Computational_Algebra. * January 2021: Martin Rasyzk Fast set operations for red-black trees. Contributions to Isabelle2021 ----------------------------- * January 2021: Manuel Eberl Characteristic of a semiring. * January 2021: Manuel Eberl Algebraic integers in HOL-Computational_Algebra. * December 2020: Stepan Holub Contributed lemmas for theory HOL.List. * December 2020: Martin Desharnais Zipperposition 2.0 as external prover for Sledgehammer. * December 2020: Walter Guttmann Extension of session HOL-Hoare with total correctness proof system. * November / December 2020: Makarius Wenzel Improved HTML presentation and PDF document preparation, using mostly Isabelle/Scala instead of Isabelle/ML. * November 2020: Stepan Holub Removed preconditions from lemma comm_append_are_replicate. * November 2020: Florian Haftmann Bundle mixins for locale and class expressions. * November 2020: Jakub Kądziołka Stronger lemmas about orders of group elements (generate_pow_card). * October 2020: Jasmin Blanchette, Martin Desharnais, Mathias Fleury Support veriT as external prover in Sledgehammer. * October 2020: Mathias Fleury Updated proof reconstruction for the SMT solver veriT in the smt method. * October 2020: Jasmin Blanchette, Martin Desharnais Support E prover 2.5 as external prover in Sledgehammer. * September 2020: Florian Haftmann Substantial reworking and modularization of Word library, with generic type conversions. * August 2020: Makarius Wenzel Finally enable PIDE protocol for batch-builds, with various consequences of handling session build databases, Isabelle/Scala within Isabelle/ML etc. * August 2020: Makarius Wenzel Improved monitoring of runtime statistics: ML GC progress and Java. * July 2020: Martin Desharnais Update to Metis 2.4. * June 2020: Makarius Wenzel Batch-builds via "isabelle build" allow to invoke Scala from ML. * June 2020: Florian Haftmann Simproc defined_all for more aggressive substitution with variables from assumptions. * May 2020: Makarius Wenzel Antiquotations for Isabelle systems programming, notably @{scala_function} and @{scala} to invoke Scala from ML. * May 2020: Florian Haftmann Generic algebraically founded bit operations NOT, AND, OR, XOR. Contributions to Isabelle2020 ----------------------------- * February 2020: E. Gunther, M. Pagano and P. Sánchez Terraf Simplified, generalised version of ZF/Constructible. * January 2020: LC Paulson The full finite Ramsey's theorem and elements of finite and infinite Ramsey theory. * December 2019: Basil Fürer, Andreas Lochbihler, Joshua Schneider, Dmitriy Traytel Extension of lift_bnf to support quotient types. * November 2019: Peter Zeller, TU Kaiserslautern Update of Isabelle/VSCode to WebviewPanel API. * October..December 2019: Makarius Wenzel Isabelle/Phabrictor server setup, including Linux platform support in Isabelle/Scala. Client-side tool "isabelle hg_setup". * October 2019: Maximilian Schäffeler Port of the HOL Light decision procedure for metric spaces. * October 2019: Makarius Wenzel More scalable Isabelle dump and underlying headless PIDE session. * August 2019: Makarius Wenzel Better support for proof terms in Isabelle/Pure, notably via method combinator SUBPROOFS (ML) and "subproofs" (Isar). * July 2019: Alexander Krauss, Makarius Wenzel Minimal support for a soft-type system within the Isabelle logical framework. Contributions to Isabelle2019 ----------------------------- * April 2019: LC Paulson Homology and supporting lemmas on topology and group theory * April 2019: Paulo de Vilhena and Martin Baillon Group theory developments, esp. algebraic closure of a field * February/March 2019: Makarius Wenzel Stateless management of export artifacts in the Isabelle/HOL code generator. * February 2019: Manuel Eberl Exponentiation by squaring, used to implement "power" in monoid_mult and fast modular exponentiation. * February 2019: Manuel Eberl Carmichael's function, primitive roots in residue rings, more properties of the order in residue rings. * February 2019: Jeremy Sylvestre Formal Laurent Series and overhaul of Formal power series. * January 2019: Florian Haftmann Clarified syntax and congruence rules for big operators on sets involving the image operator. * January 2019: Florian Haftmann Renovation of code generation, particularly export into session data and proper strings and proper integers based on zarith for OCaml. * January 2019: Andreas Lochbihler New implementation for case_of_simps based on Code_Lazy's pattern matching elimination algorithm. * November/December 2018: Makarius Wenzel Support for Isabelle/Haskell applications of Isabelle/PIDE. * August/September 2018: Makarius Wenzel Improvements of headless Isabelle/PIDE session and server, and systematic exports from theory documents. * December 2018: Florian Haftmann Generic executable sorting algorithms based on executable comparators. * October 2018: Mathias Fleury Proof reconstruction for the SMT solver veriT in the smt method. Contributions to Isabelle2018 ----------------------------- * July 2018: Manuel Eberl "real_asymp" proof method for automatic proofs of real limits, "Big-O" statements, etc. * June 2018: Fabian Immler More tool support for HOL-Types_To_Sets. * June 2018: Martin Baillon and Paulo Emílio de Vilhena A variety of contributions to HOL-Algebra. * June 2018: Wenda Li New/strengthened results involving analysis, topology, etc. * May/June 2018: Makarius Wenzel System infrastructure to export blobs as theory presentation, and to dump PIDE database content in batch mode. * May 2018: Manuel Eberl Landau symbols and asymptotic equivalence (moved from the AFP). * May 2018: Jose Divasón (Universidad de la Rioja), Jesús Aransay (Universidad de la Rioja), Johannes Hölzl (VU Amsterdam), Fabian Immler (TUM) Generalizations in the formalization of linear algebra. * May 2018: Florian Haftmann Consolidation of string-like types in HOL. * May 2018: Andreas Lochbihler (Digital Asset), Pascal Stoop (ETH Zurich) Code generation with lazy evaluation semantics. * March 2018: Florian Haftmann Abstract bit operations push_bit, take_bit, drop_bit, alongside with an algebraic foundation for bit strings and word types in HOL-ex. * March 2018: Viorel Preoteasa Generalisation of complete_distrib_lattice * February 2018: Wenda Li A unified definition for the order of zeros and poles. Improved reasoning around non-essential singularities. * January 2018: Sebastien Gouezel Various small additions to HOL-Analysis * December 2017: Jan Gilcher, Andreas Lochbihler, Dmitriy Traytel A new conditional parametricity prover. * October 2017: Alexander Maletzky Derivation of axiom "iff" in theory HOL.HOL from the other axioms. Contributions to Isabelle2017 ----------------------------- * September 2017: Lawrence Paulson HOL-Analysis, e.g. simplicial complexes, Jordan Curve Theorem. * September 2017: Jasmin Blanchette Further integration of Nunchaku model finder. * November 2016 - June 2017: Makarius Wenzel New Isabelle/VSCode, with underlying restructuring of Isabelle/PIDE. * 2017: Makarius Wenzel Session-qualified theory names (theory imports and ROOT files). Prover IDE improvements. Support for SQL databases in Isabelle/Scala: SQLite and PostgreSQL. * August 2017: Andreas Lochbihler, ETH Zurich type of unordered pairs (HOL-Library.Uprod) * August 2017: Manuel Eberl, TUM HOL-Analysis: infinite products over natural numbers, infinite sums over arbitrary sets, connection between formal power series and analytic complex functions * March 2017: Alasdair Armstrong, University of Sheffield and Simon Foster, University of York Fixed-point theory and Galois Connections in HOL-Algebra. * February 2017: Florian Haftmann, TUM Statically embedded computations implemented by generated code. Contributions to Isabelle2016-1 ------------------------------- * December 2016: Ondřej Kunčar, TUM Types_To_Sets: experimental extension of Higher-Order Logic to allow translation of types to sets. * October 2016: Jasmin Blanchette Integration of Nunchaku model finder. * October 2016: Jaime Mendizabal Roche, TUM Ported remaining theories of session Old_Number_Theory to the new Number_Theory and removed Old_Number_Theory. * September 2016: Sascha Boehme Proof method "argo" based on SMT technology for a combination of quantifier-free propositional logic, equality and linear real arithmetic * July 2016: Daniel Stuewe Height-size proofs in HOL-Data_Structures. * July 2016: Manuel Eberl, TUM Algebraic foundation for primes; generalization from nat to general factorial rings. * June 2016: Andreas Lochbihler, ETH Zurich Formalisation of discrete subprobability distributions. * June 2016: Florian Haftmann, TUM Improvements to code generation: optional timing measurements, more succint closures for static evaluation, less ambiguities concering Scala implicits. * May 2016: Manuel Eberl, TUM Code generation for Probability Mass Functions. * March 2016: Florian Haftmann, TUM Abstract factorial rings with unique factorization. * March 2016: Florian Haftmann, TUM Reworking of the HOL char type as special case of a finite numeral type. * March 2016: Andreas Lochbihler, ETH Zurich Reasoning support for monotonicity, continuity and admissibility in chain-complete partial orders. * February - October 2016: Makarius Wenzel Prover IDE improvements. ML IDE improvements: bootstrap of Pure. Isar language consolidation. Notational modernization of HOL. Tight Poly/ML integration. More Isabelle/Scala system programming modules (e.g. SSH, Mercurial). * Winter 2016: Jasmin Blanchette, Inria & LORIA & MPII, Aymeric Bouzy, Ecole polytechnique, Andreas Lochbihler, ETH Zurich, Andrei Popescu, Middlesex University, and Dmitriy Traytel, ETH Zurich 'corec' command and friends. * January 2016: Florian Haftmann, TUM Abolition of compound operators INFIMUM and SUPREMUM for complete lattices. Contributions to Isabelle2016 ----------------------------- * Winter 2016: Manuel Eberl, TUM Support for real exponentiation ("powr") in the "approximation" method. (This was removed in Isabelle 2015 due to a changed definition of "powr".) * Summer 2015 - Winter 2016: Lawrence C Paulson, Cambridge General, homology form of Cauchy's integral theorem and supporting material (ported from HOL Light). * Winter 2015/16: Gerwin Klein, NICTA New print_record command. * May - December 2015: Makarius Wenzel Prover IDE improvements. More Isar language elements. Document language refinements. Poly/ML debugger integration. Improved multi-platform and multi-architecture support. * Winter 2015: Manuel Eberl, TUM The radius of convergence of power series and various summability tests. Harmonic numbers and the Euler-Mascheroni constant. The Generalised Binomial Theorem. The complex and real Gamma/log-Gamma/Digamma/Polygamma functions and their most important properties. * Autumn 2015: Manuel Eberl, TUM Proper definition of division (with remainder) for formal power series; Euclidean Ring and GCD instance for formal power series. * Autumn 2015: Florian Haftmann, TUM Rewrite definitions for global interpretations and sublocale declarations. * Autumn 2015: Andreas Lochbihler Bourbaki-Witt fixpoint theorem for increasing functions on chain-complete partial orders. * Autumn 2015: Chaitanya Mangla, Lawrence C Paulson, and Manuel Eberl A large number of additional binomial identities. * Summer 2015: Daniel Matichuk, NICTA and Makarius Wenzel Isar subgoal command for proof structure within unstructured proof scripts. * Summer 2015: Florian Haftmann, TUM Generic partial division in rings as inverse operation of multiplication. * Summer 2015: Manuel Eberl and Florian Haftmann, TUM Type class hierarchy with common algebraic notions of integral (semi)domains like units, associated elements and normalization wrt. units. * Summer 2015: Florian Haftmann, TUM Fundamentals of abstract type class for factorial rings. * Summer 2015: Julian Biendarra, TUM and Dmitriy Traytel, ETH Zurich Command to lift a BNF structure on the raw type to the abstract type for typedefs. * Summer 2014: Jeremy Avigad, Luke Serafin, CMU, and Johannes Hölzl, TUM Proof of the central limit theorem: includes weak convergence, characteristic functions, and Levy's uniqueness and continuity theorem. Contributions to Isabelle2015 ----------------------------- * 2014/2015: Daniel Matichuk, Toby Murray, NICTA and Makarius Wenzel The Eisbach proof method language and "match" method. * Winter 2014 and Spring 2015: Ondrej Kuncar, TUM Extension of lift_definition to execute lifted functions that have as a return type a datatype containing a subtype. * March 2015: Jasmin Blanchette, Inria & LORIA & MPII, Mathias Fleury, MPII, and Dmitriy Traytel, TUM More multiset theorems, syntax, and operations. * December 2014: Johannes Hölzl, Manuel Eberl, Sudeep Kanav, TUM, and Jeremy Avigad, Luke Serafin, CMU Various integration theorems: mostly integration on intervals and substitution. * September 2014: Florian Haftmann, TUM Lexicographic order on functions and sum/product over function bodies. * August 2014: Andreas Lochbihler, ETH Zurich Test infrastructure for executing generated code in target languages. * August 2014: Manuel Eberl, TUM Generic euclidean algorithms for GCD et al. Contributions to Isabelle2014 ----------------------------- * July 2014: Thomas Sewell, NICTA Preserve equality hypotheses in "clarify" and friends. New "hypsubst_thin" method configuration option. * Summer 2014: Florian Haftmann, TUM Consolidation and generalization of facts concerning (abelian) semigroups and monoids, particularly products (resp. sums) on finite sets. * Summer 2014: Mathias Fleury, ENS Rennes, and Albert Steckermeier, TUM Work on exotic automatic theorem provers for Sledgehammer (LEO-II, veriT, Waldmeister, etc.). * June 2014: Florian Haftmann, TUM Internal reorganisation of the local theory / named target stack. * June 2014: Sudeep Kanav, TUM, Jeremy Avigad, CMU, and Johannes Hölzl, TUM Various properties of exponentially, Erlang, and normal distributed random variables. * May 2014: Cezary Kaliszyk, University of Innsbruck, and Jasmin Blanchette, TUM SML-based engines for MaSh. * March 2014: René Thiemann Improved code generation for multisets. * February 2014: Florian Haftmann, TUM Permanent interpretation inside theory, locale and class targets with mixin definitions. * Spring 2014: Lawrence C Paulson, Cambridge Theory Complex_Basic_Analysis. Tidying up Number_Theory vs Old_Number_Theory * Winter 2013 and Spring 2014: Ondrej Kuncar, TUM Various improvements to Lifting/Transfer, integration with the BNF package. * Winter 2013 and Spring 2014: Makarius Wenzel, Université Paris-Sud / LRI Improvements of Isabelle/Scala and Isabelle/jEdit Prover IDE. * Fall 2013 and Winter 2014: Martin Desharnais, Lorenz Panny, Dmitriy Traytel, and Jasmin Blanchette, TUM Various improvements to the BNF-based (co)datatype package, including a more polished "primcorec" command, optimizations, and integration in the "HOL" session. * Winter/Spring 2014: Sascha Boehme, QAware GmbH, and Jasmin Blanchette, TUM "SMT2" module and "smt2" proof method, based on SMT-LIB 2 and Z3 4.3. * January 2014: Lars Hupel, TUM An improved, interactive simplifier trace with integration into the Isabelle/jEdit Prover IDE. * December 2013: Florian Haftmann, TUM Consolidation of abstract interpretations concerning min and max. * November 2013: Florian Haftmann, TUM Abolition of negative numeral literals in the logic. Contributions to Isabelle2013-1 ------------------------------- * September 2013: Lars Noschinski, TUM Conversion between function definitions as list of equations and case expressions in HOL. New library Simps_Case_Conv with commands case_of_simps, simps_of_case. * September 2013: Nik Sultana, University of Cambridge Improvements to HOL/TPTP parser and import facilities. * September 2013: Johannes Hölzl and Dmitriy Traytel, TUM New "coinduction" method (residing in HOL-BNF) to avoid boilerplate. * Summer 2013: Makarius Wenzel, Université Paris-Sud / LRI Improvements of Isabelle/Scala and Isabelle/jEdit Prover IDE. * Summer 2013: Manuel Eberl, TUM Generation of elimination rules in the function package. New command "fun_cases". * Summer 2013: Christian Sternagel, JAIST Improved support for ad hoc overloading of constants, including documentation and examples. * Spring and Summer 2013: Lorenz Panny, Dmitriy Traytel, and Jasmin Blanchette, TUM Various improvements to the BNF-based (co)datatype package, including "primrec_new" and "primcorec" commands and a compatibility layer. * Spring and Summer 2013: Ondrej Kuncar, TUM Various improvements of Lifting and Transfer packages. * Spring 2013: Brian Huffman, Galois Inc. Improvements of the Transfer package. * Summer 2013: Daniel Kühlwein, ICIS, Radboud University Nijmegen Jasmin Blanchette, TUM Various improvements to MaSh, including a server mode. * First half of 2013: Steffen Smolka, TUM Further improvements to Sledgehammer's Isar proof generator. * May 2013: Florian Haftmann, TUM Ephemeral interpretation in local theories. * May 2013: Lukas Bulwahn and Nicolai Schaffroth, TUM Spec_Check: A Quickcheck tool for Isabelle/ML. * April 2013: Stefan Berghofer, secunet Security Networks AG Dmitriy Traytel, TUM Makarius Wenzel, Université Paris-Sud / LRI Case translations as a separate check phase independent of the datatype package. * March 2013: Florian Haftmann, TUM Reform of "big operators" on sets. * March 2013: Florian Haftmann, TUM Algebraic locale hierarchy for orderings and (semi)lattices. * February 2013: Florian Haftmann, TUM Reworking and consolidation of code generation for target language numerals. * February 2013: Florian Haftmann, TUM Sieve of Eratosthenes. Contributions to Isabelle2013 ----------------------------- * 2012: Makarius Wenzel, Université Paris-Sud / LRI Improvements of Isabelle/Scala and Isabelle/jEdit Prover IDE. * Fall 2012: Daniel Kühlwein, ICIS, Radboud University Nijmegen Jasmin Blanchette, TUM Implemented Machine Learning for Sledgehammer (MaSh). * Fall 2012: Steffen Smolka, TUM Various improvements to Sledgehammer's Isar proof generator, including a smart type annotation algorithm and proof shrinking. * December 2012: Alessandro Coglio, Kestrel Contributions to HOL's Lattice library. * November 2012: Fabian Immler, TUM "Symbols" dockable for Isabelle/jEdit. * November 2012: Fabian Immler, TUM Proof of the Daniell-Kolmogorov theorem: the existence of the limit of projective families. * October 2012: Andreas Lochbihler, KIT Efficient construction of red-black trees from sorted associative lists. * September 2012: Florian Haftmann, TUM Lattice instances for type option. * September 2012: Christian Sternagel, JAIST Consolidated HOL/Library (theories: Prefix_Order, Sublist, and Sublist_Order) w.r.t. prefixes, suffixes, and embedding on lists. * August 2012: Dmitriy Traytel, Andrei Popescu, Jasmin Blanchette, TUM New BNF-based (co)datatype package. * August 2012: Andrei Popescu and Dmitriy Traytel, TUM Theories of ordinals and cardinals. * July 2012: Makarius Wenzel, Université Paris-Sud / LRI Advanced support for Isabelle sessions and build management, notably "isabelle build". * June 2012: Felix Kuperjans, Lukas Bulwahn, TUM and Rafal Kolanski, NICTA Simproc for rewriting set comprehensions into pointfree expressions. * May 2012: Andreas Lochbihler, KIT Theory of almost everywhere constant functions. * 2010-2012: Markus Kaiser and Lukas Bulwahn, TUM Graphview in Scala/Swing. Contributions to Isabelle2012 ----------------------------- * April 2012: Johannes Hölzl, TUM Probability: Introduced type to represent measures instead of locales. * April 2012: Johannes Hölzl, Fabian Immler, TUM Float: Moved to Dyadic rationals to represent floating point numers. * April 2012: Thomas Sewell, NICTA and 2010: Sascha Boehme, TUM Theory HOL/Word/WordBitwise: logic/circuit expansion of bitvector equalities/inequalities. * March 2012: Christian Sternagel, JAIST Consolidated theory of relation composition. * March 2012: Nik Sultana, University of Cambridge HOL/TPTP parser and import facilities. * March 2012: Cezary Kaliszyk, University of Innsbruck and Alexander Krauss, QAware GmbH Faster and more scalable Import mechanism for HOL Light proofs. * January 2012: Florian Haftmann, TUM, et al. (Re-)Introduction of the "set" type constructor. * 2012: Ondrej Kuncar, TUM New package Lifting, various improvements and refinements to the Quotient package. * 2011/2012: Jasmin Blanchette, TUM Various improvements to Sledgehammer, notably: tighter integration with SPASS, support for more provers (Alt-Ergo, iProver, iProver-Eq). * 2011/2012: Makarius Wenzel, Université Paris-Sud / LRI Various refinements of local theory infrastructure. Improvements of Isabelle/Scala layer and Isabelle/jEdit Prover IDE. Contributions to Isabelle2011-1 ------------------------------- * September 2011: Peter Gammie Theory HOL/Library/Saturated: numbers with saturated arithmetic. * August 2011: Florian Haftmann, Johannes Hölzl and Lars Noschinski, TUM Refined theory on complete lattices. * August 2011: Brian Huffman, Portland State University Miscellaneous cleanup of Complex_Main and Multivariate_Analysis. * June 2011: Brian Huffman, Portland State University Proof method "countable_datatype" for theory Library/Countable. * 2011: Jasmin Blanchette, TUM Various improvements to Sledgehammer, notably: use of sound translations, support for more provers (Waldmeister, LEO-II, Satallax). Further development of Nitpick and 'try' command. * 2011: Andreas Lochbihler, Karlsruhe Institute of Technology Theory HOL/Library/Cset_Monad allows do notation for computable sets (cset) via the generic monad ad-hoc overloading facility. * 2011: Johannes Hölzl, Armin Heller, TUM and Bogdan Grechuk, University of Edinburgh Theory HOL/Library/Extended_Reals: real numbers extended with plus and minus infinity. * 2011: Makarius Wenzel, Université Paris-Sud / LRI Various building blocks for Isabelle/Scala layer and Isabelle/jEdit Prover IDE. Contributions to Isabelle2011 ----------------------------- * January 2011: Stefan Berghofer, secunet Security Networks AG HOL-SPARK: an interactive prover back-end for SPARK. * October 2010: Bogdan Grechuk, University of Edinburgh Extended convex analysis in Multivariate Analysis. * October 2010: Dmitriy Traytel, TUM Coercive subtyping via subtype constraints. * October 2010: Alexander Krauss, TUM Command partial_function for function definitions based on complete partial orders in HOL. * September 2010: Florian Haftmann, TUM Refined concepts for evaluation, i.e., normalization of terms using different techniques. * September 2010: Florian Haftmann, TUM Code generation for Scala. * August 2010: Johannes Hoelzl, Armin Heller, and Robert Himmelmann, TUM Improved Probability theory in HOL. * July 2010: Florian Haftmann, TUM Reworking and extension of the Imperative HOL framework. * July 2010: Alexander Krauss, TUM and Christian Sternagel, University of Innsbruck Ad-hoc overloading. Generic do notation for monads. Contributions to Isabelle2009-2 ------------------------------- * 2009/2010: Stefan Berghofer, Alexander Krauss, and Andreas Schropp, TUM, Makarius Wenzel, TUM / LRI Elimination of type classes from proof terms. * April 2010: Florian Haftmann, TUM Reorganization of abstract algebra type classes. * April 2010: Florian Haftmann, TUM Code generation for data representations involving invariants; various collections avaiable in theories Fset, Dlist, RBT, Mapping and AssocList. * March 2010: Sascha Boehme, TUM Efficient SHA1 library for Poly/ML. * February 2010: Cezary Kaliszyk and Christian Urban, TUM Quotient type package for Isabelle/HOL. Contributions to Isabelle2009-1 ------------------------------- * November 2009, Brian Huffman, PSU New definitional domain package for HOLCF. * November 2009: Robert Himmelmann, TUM Derivation and Brouwer's fixpoint theorem in Multivariate Analysis. * November 2009: Stefan Berghofer and Lukas Bulwahn, TUM A tabled implementation of the reflexive transitive closure. * November 2009: Lukas Bulwahn, TUM Predicate Compiler: a compiler for inductive predicates to equational specifications. * November 2009: Sascha Boehme, TUM and Burkhart Wolff, LRI Paris HOL-Boogie: an interactive prover back-end for Boogie and VCC. * October 2009: Jasmin Blanchette, TUM Nitpick: yet another counterexample generator for Isabelle/HOL. * October 2009: Sascha Boehme, TUM Extension of SMT method: proof-reconstruction for the SMT solver Z3. * October 2009: Florian Haftmann, TUM Refinement of parts of the HOL datatype package. * October 2009: Florian Haftmann, TUM Generic term styles for term antiquotations. * September 2009: Thomas Sewell, NICTA More efficient HOL/record implementation. * September 2009: Sascha Boehme, TUM SMT method using external SMT solvers. * September 2009: Florian Haftmann, TUM Refinement of sets and lattices. * July 2009: Jeremy Avigad and Amine Chaieb New number theory. * July 2009: Philipp Meyer, TUM HOL/Library/Sum_Of_Squares: functionality to call a remote csdp prover. * July 2009: Florian Haftmann, TUM New quickcheck implementation using new code generator. * July 2009: Florian Haftmann, TUM HOL/Library/Fset: an explicit type of sets; finite sets ready to use for code generation. * June 2009: Florian Haftmann, TUM HOL/Library/Tree: search trees implementing mappings, ready to use for code generation. * March 2009: Philipp Meyer, TUM Minimization tool for results from Sledgehammer. Contributions to Isabelle2009 ----------------------------- * March 2009: Robert Himmelmann, TUM and Amine Chaieb, University of Cambridge Elementary topology in Euclidean space. * March 2009: Johannes Hoelzl, TUM Method "approximation", which proves real valued inequalities by computation. * February 2009: Filip Maric, Univ. of Belgrade A Serbian theory. * February 2009: Jasmin Christian Blanchette, TUM Misc cleanup of HOL/refute. * February 2009: Timothy Bourke, NICTA New find_consts command. * February 2009: Timothy Bourke, NICTA "solves" criterion for find_theorems and auto_solve option * December 2008: Clemens Ballarin, TUM New locale implementation. * December 2008: Armin Heller, TUM and Alexander Krauss, TUM Method "sizechange" for advanced termination proofs. * November 2008: Timothy Bourke, NICTA Performance improvement (factor 50) for find_theorems. * 2008: Florian Haftmann, TUM Various extensions and restructurings in HOL, improvements in evaluation mechanisms, new module binding.ML for name bindings. * October 2008: Fabian Immler, TUM ATP manager for Sledgehammer, based on ML threads instead of Posix processes. Additional ATP wrappers, including remote SystemOnTPTP services. * September 2008: Stefan Berghofer, TUM and Marc Bezem, Univ. Bergen Prover for coherent logic. * August 2008: Fabian Immler, TUM Vampire wrapper script for remote SystemOnTPTP service. Contributions to Isabelle2008 ----------------------------- * 2007/2008: Alexander Krauss, TUM and Florian Haftmann, TUM and Stefan Berghofer, TUM HOL library improvements. * 2007/2008: Brian Huffman, PSU HOLCF library improvements. * 2007/2008: Stefan Berghofer, TUM HOL-Nominal package improvements. * March 2008: Markus Reiter, TUM HOL/Library/RBT: red-black trees. * February 2008: Alexander Krauss, TUM and Florian Haftmann, TUM and Lukas Bulwahn, TUM and John Matthews, Galois: HOL/Library/Imperative_HOL: Haskell-style imperative data structures for HOL. * December 2007: Norbert Schirmer, Uni Saarbruecken Misc improvements of record package in HOL. * December 2007: Florian Haftmann, TUM Overloading and class instantiation target. * December 2007: Florian Haftmann, TUM New version of primrec package for local theories. * December 2007: Alexander Krauss, TUM Method "induction_scheme" in HOL. * November 2007: Peter Lammich, Uni Muenster HOL-Lattice: some more lemmas. Contributions to Isabelle2007 ----------------------------- * October 2007: Norbert Schirmer, TUM / Uni Saarbruecken State Spaces: The Locale Way (in HOL). * October 2007: Mark A. Hillebrand, DFKI Robust sub/superscripts in LaTeX document output. * August 2007: Jeremy Dawson, NICTA and Paul Graunke, Galois and Brian Huffman, PSU and Gerwin Klein, NICTA and John Matthews, Galois HOL-Word: a library for fixed-size machine words in Isabelle. * August 2007: Brian Huffman, PSU HOL/Library/Boolean_Algebra and HOL/Library/Numeral_Type. * June 2007: Amine Chaieb, TUM Semiring normalization and Groebner Bases. Support for dense linear orders. * June 2007: Joe Hurd, Oxford Metis theorem-prover. * 2007: Kong W. Susanto, Cambridge HOL: Metis prover integration. * 2007: Stefan Berghofer, TUM HOL: inductive predicates and sets. * 2007: Norbert Schirmer, TUM HOL/record: misc improvements. * 2006/2007: Alexander Krauss, TUM HOL: function package and related theories on termination. * 2006/2007: Florian Haftmann, TUM Pure: generic code generator framework. Pure: class package. HOL: theory reorganization, code generator setup. * 2006/2007: Christian Urban, TUM and Stefan Berghofer, TUM and Julien Narboux, TUM HOL/Nominal package and related tools. * November 2006: Lukas Bulwahn, TUM HOL: method "lexicographic_order" for function package. * October 2006: Stefan Hohe, TUM HOL-Algebra: ideals and quotients over rings. * August 2006: Amine Chaieb, TUM Experimental support for generic reflection and reification in HOL. * July 2006: Rafal Kolanski, NICTA Hex (0xFF) and binary (0b1011) numerals. * May 2006: Klaus Aehlig, LMU Command 'normal_form': normalization by evaluation. * May 2006: Amine Chaieb, TUM HOL-Complex: Ferrante and Rackoff Algorithm for linear real arithmetic. * February 2006: Benjamin Porter, NICTA HOL and HOL-Complex: generalised mean value theorem, continuum is not denumerable, harmonic and arithmetic series, and denumerability of rationals. * October 2005: Martin Wildmoser, TUM Sketch for Isar 'guess' element. Contributions to Isabelle2005 ----------------------------- * September 2005: Lukas Bulwahn and Bernhard Haeupler, TUM HOL-Complex: Formalization of Taylor series. * September 2005: Stephan Merz, Alwen Tiu, QSL Loria Components for SAT solver method using zChaff. * September 2005: Ning Zhang and Christian Urban, LMU Munich A Chinese theory. * September 2005: Bernhard Haeupler, TUM Method comm_ring for proving equalities in commutative rings. * July/August 2005: Jeremy Avigad, Carnegie Mellon University Various improvements of the HOL and HOL-Complex library. * July 2005: Florian Zuleger, Johannes Hoelzl, and Simon Funke, TUM Some structured proofs about completeness of real numbers. * May 2005: Rafal Kolanski and Gerwin Klein, NICTA Improved retrieval of facts from theory/proof context. * February 2005: Lucas Dixon, University of Edinburgh Improved subst method. * 2005: Brian Huffman, OGI Various improvements of HOLCF. Some improvements of the HOL-Complex library. * 2005: Claire Quigley and Jia Meng, University of Cambridge Some support for asynchronous communication with external provers (experimental). * 2005: Florian Haftmann, TUM Contributions to document 'sugar'. Various ML combinators, notably linear functional transformations. Some cleanup of ML legacy. Additional antiquotations. Improved Isabelle web site. * 2004/2005: David Aspinall, University of Edinburgh Various elements of XML and PGIP based communication with user interfaces (experimental). * 2004/2005: Gerwin Klein, NICTA Contributions to document 'sugar'. Improved Isabelle web site. Improved HTML presentation of theories. * 2004/2005: Clemens Ballarin, TUM Provers: tools for transitive relations and quasi orders. Improved version of locales, notably interpretation of locales. Improved version of HOL-Algebra. * 2004/2005: Amine Chaieb, TUM Improved version of HOL presburger method. * 2004/2005: Steven Obua, TUM Improved version of HOL/Import, support for HOL-Light. Improved version of HOL-Complex-Matrix. Pure/defs: more sophisticated checks on well-formedness of overloading. Pure/Tools: an experimental evaluator for lambda terms. * 2004/2005: Norbert Schirmer, TUM Contributions to document 'sugar'. Improved version of HOL/record. * 2004/2005: Sebastian Skalberg, TUM Improved version of HOL/Import. Some internal ML reorganizations. * 2004/2005: Tjark Weber, TUM SAT solver method using zChaff. Improved version of HOL/refute. :maxLineLen=78: diff --git a/NEWS b/NEWS --- a/NEWS +++ b/NEWS @@ -1,16781 +1,16812 @@ Isabelle NEWS -- history of user-relevant changes ================================================= (Note: Isabelle/jEdit shows a tree-view of the NEWS file in Sidekick.) +New in this Isabelle version +---------------------------- + +*** General *** + +* Old-style {* verbatim *} tokens have been discontinued (legacy feature +since Isabelle2019). INCOMPATIBILITY, use \cartouche\ syntax instead. + + +*** HOL *** + +* Theory "HOL.Relation": Added lemmas asymp_less and asymp_greater to + type class preorder. + +* Theory "HOL-Library.Multiset": + - Consolidated operation and fact names. + multp ~> multp_code + multeqp ~> multeqp_code + multp_cancel_add_mset ~> multp_cancel_add_mset0 + multp_cancel_add_mset0[simplified] ~> multp_cancel_add_mset + multp_code_iff ~> multp_code_iff_mult + multeqp_code_iff ~> multeqp_code_iff_reflcl_mult + Minor INCOMPATIBILITY. + - Moved mult1_lessE out of preorder type class and add explicit + assumption. Minor INCOMPATIBILITY. + - Added predicate multp equivalent to set mult. Reuse name previously + used for what is now called multp_code. Minor INCOMPATIBILITY. + - Lifted multiple lemmas from mult to multp. + - Redefined less_multiset to be based on multp. INCOMPATIBILITY. + + New in Isabelle2021-1 (December 2021) ------------------------------------- *** General *** * The Isabelle/Haskell library ($ISABELLE_HOME/src/Tools/Haskell) has been significantly improved. In particular, module Isabelle.Bytes provides type Bytes for light-weight byte strings (with optional UTF8 interpretation), similar to type string in Isabelle/ML. Isabelle symbols now work uniformly in Isabelle/Haskell vs. Isabelle/ML vs. Isabelle/Scala/PIDE. * Configuration option "show_results" controls output of final results in commands like 'definition' or 'theorem'. Output is normally enabled in interactive mode, but it could occasionally cause unnecessary slowdown. It can be disabled like this: context notes [[show_results = false]] begin definition "test = True" theorem test by (simp add: test_def) end * Theory_Data / Generic_Data: "val extend = I" has been removed; obsolete since Isabelle2021. * More symbol definitions for the Z Notation (Isabelle fonts and LaTeX). See also the group "Z Notation" in the Symbols dockable of Isabelle/jEdit. *** Isar *** * Commands 'syntax' and 'no_syntax' now work in a local theory context, but in contrast to 'notation' and 'no_notation' there is no proper way to refer to local entities. Note that local syntax works well with 'bundle', e.g. see "lattice_syntax" vs. "no_lattice_syntax" in theory Main of Isabelle/HOL. * The improper proof command 'guess' is no longer part of by Pure, but provided by the separate theory "Pure-ex.Guess". INCOMPATIBILITY, existing applications need to import session "Pure-ex" and theory "Pure-ex.Guess". Afterwards it is usually better eliminate the 'guess' command, using explicit 'obtain' instead. * More robust 'proof' outline for method "induct": support nested cases. *** Isabelle/jEdit Prover IDE *** * The main plugin for Isabelle/jEdit can be deactivated and reactivated as documented --- was broken at least since Isabelle2018. * Isabelle/jEdit is now composed more conventionally from the original jEdit text editor in $JEDIT_HOME (with minor patches), plus two Isabelle plugins that are produced in $JEDIT_SETTINGS/jars on demand. The main isabelle.jedit module is now part of Isabelle/Scala (as one big $ISABELLE_SCALA_JAR). * Add-on components may provide their own jEdit plugins, via the new Scala/Java module concept: instances of class isabelle.Scala_Project.Plugin that are declared as "services" within etc/build.props are activated on Isabelle/jEdit startup. E.g. see existing isabelle.jedit.JEdit_Plugin0 (for isabelle_jedit_base.jar) and isabelle.jedit.JEdit_Plugin1 (for isabelle_jedit_main.jar). * Support for built-in font substitution of jEdit text area. *** Document preparation *** * HTML presentation now includes links to formal entities. * High-quality blackboard-bold symbols from font "txmia" (LaTeX package "pxfonts"): \\\\\\\\\\\\\\\\\\\\\\\\\\. * More predefined symbols: \ \ \ (package "stmaryrd"), \ \ (LaTeX package "pifont"). * Document antiquotations for ML text have been refined: "def" and "ref" variants support index entries, e.g. @{ML} (no entry) vs. @{ML_def} (bold entry) vs. @{ML_ref} (regular entry); @{ML_type} supports explicit type arguments for constructors (only relevant for index), e.g. @{ML_type \'a list\} vs. @{ML_type 'a \list\}; @{ML_op} has been renamed to @{ML_infix}. Minor INCOMPATIBILITY concerning name and syntax. * Option "document_logo" determines if an instance of the Isabelle logo should be created in the document output directory. The given string specifies the name of the logo variant, while "_" (underscore) refers to the unnamed variant. The output file name is always "isabelle_logo.pdf". * Option "document_build" determines the document build engine, as defined in Isabelle/Scala (as system service). The subsequent engines are provided by the Isabelle distribution: - "lualatex" (default): use ISABELLE_LUALATEX for a standard LaTeX build with optional ISABELLE_BIBTEX and ISABELLE_MAKEINDEX - "pdflatex": as above, but use ISABELLE_PDFLATEX (legacy mode for special LaTeX styles) - "build": delegate to the executable "./build pdf" The presence of a "build" command within the document output directory explicitly requires document_build=build. Minor INCOMPATIBILITY, need to adjust session ROOT options. * Option "document_comment_latex" enables regular LaTeX comment.sty, instead of the historic version for plain TeX (default). The latter is much faster, but in conflict with LaTeX classes like Dagstuhl LIPIcs. * Option "document_echo" informs about document file names during session presentation. * Option "document_heading_prefix" specifies a prefix for the LaTeX macro names generated from document heading commands like 'chapter', 'section' etc. The default is "isamarkup", so 'section' becomes "\isamarkupsection" for example. * The command-line tool "isabelle latex" has been discontinued, INCOMPATIBILITY for old document build scripts. - Former "isabelle latex -o sty" has become obsolete: Isabelle .sty files are automatically generated within the document output directory. - Former "isabelle latex -o pdf" should be replaced by "$ISABELLE_LUALATEX root" or "$ISABELLE_PDFLATEX root" (without quotes), according to the intended LaTeX engine. - Former "isabelle latex -o bbl" should be replaced by "$ISABELLE_BIBTEX root" (without quotes). - Former "isabelle latex -o idx" should be replaced by "$ISABELLE_MAKEINDEX root" (without quotes). * Option "document_bibliography" explicitly enables the use of bibtex; the default is to check the presence of root.bib, but it could have a different name. * Improved LaTeX typesetting of \...\ using \guilsinglleft ... \guilsinglright. INCOMPATIBILITY, need to use \usepackage[T1]{fontenc} (which is now also the default in "isabelle mkroot"). * Simplified typesetting of \...\ using \guillemotleft ... \guillemotright from \usepackage[T1]{fontenc} --- \usepackage{babel} is no longer required. *** Pure *** * "global_interpretation" is applicable in instantiation and overloading targets and in any nested target built on top of a target supporting "global_interpretation". *** HOL *** * New order prover. * Theorems "antisym" and "eq_iff" in class "order" have been renamed to "order.antisym" and "order.eq_iff", to coexist locally with "antisym" and "eq_iff" from locale "ordering". INCOMPATIBILITY: significant potential for change can be avoided if interpretations of type class "order" are replaced or augmented by interpretations of locale "ordering". * Theorem "swap_def" now is always qualified as "Fun.swap_def". Minor INCOMPATIBILITY; note that for most applications less elementary lemmas exists. * Lemma "permutes_induct" has been given stronger hypotheses and named premises. INCOMPATIBILITY. * Combinator "Fun.swap" resolved into a mere input abbreviation in separate theory "Transposition" in HOL-Combinatorics. INCOMPATIBILITY. * Theory Bit_Operations is now part of HOL-Main. Minor INCOMPATIBILITY. * Infix syntax for bit operations AND, OR, XOR, NOT is now organized in bundle bit_operations_syntax. INCOMPATIBILITY. * Bit operations set_bit, unset_bit and flip_bit are now class operations. INCOMPATIBILITY. * Simplified class hierarchy for bit operations: bit operations reside in classes (semi)ring_bit_operations, class semiring_bit_shifts is gone. * Consecutive conversions to and from words are not collapsed in any case: rules unsigned_of_nat, unsigned_of_int, signed_of_int, signed_of_nat, word_of_nat_eq_0_iff, word_of_int_eq_0_iff are not simp by default any longer. INCOMPATIBILITY. * Abbreviation "max_word" has been moved to session Word_Lib in the AFP, as also have constants "shiftl1", "shiftr1", "sshiftr1", "bshiftr1", "setBit", "clearBit". See there further the changelog in theory Guide. INCOMPATIBILITY. * Reorganized classes and locales for boolean algebras. INCOMPATIBILITY. * New simp rules: less_exp, min.absorb1, min.absorb2, min.absorb3, min.absorb4, max.absorb1, max.absorb2, max.absorb3, max.absorb4. Minor INCOMPATIBILITY. * The Mirabelle testing tool is now part of Main HOL, and accessible via the command-line tool "isabelle mirabelle" (implemented in Isabelle/Scala). It has become more robust and supports parallelism within Isabelle/ML. * Nitpick: External solver "MiniSat" is available for all supported Isabelle platforms (including 64bit Windows and ARM); while "MiniSat_JNI" only works for Intel Linux and macOS. * Nitpick/Kodkod: default is back to external Java process (option kodkod_scala = false), both for PIDE and batch builds. This reduces confusion and increases robustness of timeouts, despite substantial overhead to run an external JVM. For more fine-grained control, the kodkod_scala option can be modified within the formal theory context like this: declare [[kodkod_scala = false]] * Sledgehammer: - Update of bundled provers: . E 2.6 . Vampire 4.6 (with Open Source license) . veriT 2021.06.1-rmx . Zipperposition 2.1 . Z3 4.4.1 for arm64-linux, which approximates Z3 4.4.0pre, but sometimes fails or crashes - Adjusted default provers: cvc4 vampire verit e spass z3 zipperposition - Adjusted Zipperposition's slicing. - Removed legacy "lam_lifting" (synonym for "lifting") from option "lam_trans". Minor INCOMPATIBILITY. - Renamed "hide_lams" to "opaque_lifting" in option "lam_trans". Minor INCOMPATIBILITY. - Added "opaque_combs" to option "lam_trans": lambda expressions are rewritten using combinators, but the combinators are kept opaque, i.e. without definitions. * Metis: - Renamed option "hide_lams" to "opaque_lifting". Minor INCOMPATIBILITY. - Updated the Metis prover underlying the "metis" proof method to version 2.4 (release 20200713). The new version fixes one implementation defect. Very slight INCOMPATIBILITY. * Theory HOL-Library.Lattice_Syntax has been superseded by bundle "lattice_syntax": it can be used in a local context via 'include' or in a global theory via 'unbundle'. The opposite declarations are bundled as "no_lattice_syntax". Minor INCOMPATIBILITY. * Theory "HOL-Library.Multiset": dedicated predicate "multiset" is gone, use explict expression instead. Minor INCOMPATIBILITY. * Theory "HOL-Library.Multiset": consolidated abbreviations Mempty, Melem, not_Melem to empty_mset, member_mset, not_member_mset respectively. Minor INCOMPATIBILITY. * Theory "HOL-Library.Multiset": consolidated operation and fact names: inf_subset_mset ~> inter_mset sup_subset_mset ~> union_mset multiset_inter_def ~> inter_mset_def sup_subset_mset_def ~> union_mset_def multiset_inter_count ~> count_inter_mset sup_subset_mset_count ~> count_union_mset * Theory "HOL-Library.Complex_Order": Defines less, less_eq on complex numbers. Not imported by default. * Theory "HOL-Library.Multiset": syntax precendence for membership operations has been adjusted to match the corresponding precendences on sets. Rare INCOMPATIBILITY. * Theory "HOL-Library.Cardinality": code generator setup based on the type classes finite_UNIV and card_UNIV has been moved to "HOL-Library.Code_Cardinality", to avoid incompatibilities with other code setups for sets in AFP/Containers. Applications relying on this code setup should import "HOL-Library.Code_Cardinality". Minor INCOMPATIBILITY. * Theory "HOL-Library.Permutation" has been renamed to the more specific "HOL-Library.List_Permutation". Note that most notions from that theory are already present in theory "HOL-Combinatorics.Permutations". INCOMPATIBILITY. * Dedicated session "HOL-Combinatorics". INCOMPATIBILITY: theories "Permutations", "List_Permutation" (formerly "Permutation"), "Stirling", "Multiset_Permutations", "Perm" have been moved there from session HOL-Library. * Theory "HOL-Combinatorics.Transposition" provides elementary swap operation "transpose". * Theory "HOL-Analysis.Infinite_Sum": new theory for infinite sums with a more general definition than the existing theory Infinite_Set_Sum. (Infinite_Set_Sum contains theorems relating the two definitions.) * Theory "HOL-Analysis.Product_Vector": Instantiation of the product of uniform spaces as a uniform space. Minor INCOMPATIBILITY: the old definition "uniformity_prod_def" is available as a derived fact "uniformity_dist". * Session "HOL-Analysis" and "HOL-Probability": indexed products of discrete distributions, negative binomial distribution, Hoeffding's inequality, Chernoff bounds, Cauchy–Schwarz inequality for nn_integral, and some more small lemmas. Some theorems that were stated awkwardly before were corrected. Minor INCOMPATIBILITY. * Session "HOL-Analysis": the complex Arg function has been identified with the function "arg" of Complex_Main, renaming arg ~> Arg also in the names of arg_bounded. Minor INCOMPATIBILITY. * Session "HOL-Statespace": various improvements and cleanup. *** ML *** * External bash processes are always managed by Isabelle/Scala, in contrast to Isabelle2021 where this was only done for macOS on Apple Silicon. The main Isabelle/ML interface is Isabelle_System.bash_process with result type Process_Result.T (resembling class Process_Result in Scala); derived operations Isabelle_System.bash and Isabelle_System.bash_output provide similar functionality as before. The underlying TCP/IP server within Isabelle/Scala is available to other programming languages as well, notably Isabelle/Haskell. Rare INCOMPATIBILITY due to subtle semantic differences: - Processes invoked from Isabelle/ML actually run in the context of the Java VM of Isabelle/Scala. The settings environment and current working directory are usually the same on both sides, but there can be subtle corner cases (e.g. unexpected uses of "cd" or "putenv" in ML). - Output via stdout and stderr is line-oriented: Unix vs. Windows line-endings are normalized towards Unix; presence or absence of a final newline is irrelevant. The original lines are available as Process_Result.out_lines/err_lines; the concatenated versions Process_Result.out/err *omit* a trailing newline (using Library.trim_line, which was occasional seen in applications before, but is no longer necessary). - Output needs to be plain text encoded in UTF-8: Isabelle/Scala recodes it temporarily as UTF-16. This works for well-formed Unicode text, but not for arbitrary byte strings. In such cases, the bash script should write tempory files, managed by Isabelle/ML operations like Isabelle_System.with_tmp_file to create a file name and File.read to retrieve its content. - The Isabelle/Scala "bash_process" server requires a PIDE session context. This could be a regular batch session (e.g. "isabelle build"), a PIDE editor session (e.g. "isabelle jedit"), or headless PIDE (e.g. "isabelle dump" or "isabelle server"). Note that old "isabelle console" or raw "isabelle process" don't have that. New Process_Result.timing works as in Isabelle/Scala, based on direct measurements of the bash_process wrapper in C: elapsed time is always available, CPU time is only available on Linux and macOS, GC time is unavailable. * The following Isabelle/ML system operations are run in the context of Isabelle/Scala, within a PIDE session context: - Isabelle_System.make_directory - Isabelle_System.copy_dir - Isabelle_System.copy_file - Isabelle_System.copy_base_file - Isabelle_System.rm_tree - Isabelle_System.download * Term operations under abstractions are now more robust (and more strict) by using the formal proof context in subsequent operations: Variable.dest_abs Variable.dest_abs_cterm Variable.dest_all Variable.dest_all_cterm This works under the assumption that terms are always properly declared to the proof context (e.g. via Variable.declare_term). Failure to do so, or working with the wrong context, will cause an error (exception Fail, based on Term.USED_FREE from Term.dest_abs_fresh). The Simplifier and equational conversions now use the above operations routinely, and thus require user-space tools to be serious about the proof context (notably in their use of Goal.prove, SUBPROOF etc.). INCOMPATIBILITY in add-on tools is to be expected occasionally: a proper context discipline needs to be followed. * Former operations Term.dest_abs and Logic.dest_all (without a proper context) have been discontinued. INCOMPATIBILITY, either use Variable.dest_abs etc. above, or the following operations that imitate the old behavior to a great extent: Term.dest_abs_global Logic.dest_all_global This works under the assumption that the given (sub-)term directly shows all free variables that need to be avoided when generating a fresh name. A violation of the assumption are variables stemming from the enclosing context that get involved in a proof only later. * ML structures TFrees, TVars, Frees, Vars, Names provide scalable operations to accumulate items from types and terms, using a fast syntactic order. The original order of occurrences may be recovered as well, e.g. via TFrees.list_set. * Thm.instantiate, Thm.generalize and related operations (e.g. Variable.import) now use scalable data structures from structure TVars, Vars, Names etc. INCOMPATIBILITY: e.g. use TVars.empty and TVars.make for immediate adoption; better use TVars.add, TVars.add_tfrees etc. for scalable accumulation of items. * Thm.instantiate_beta applies newly emerging abstractions to their arguments in the term, but leaves other beta-redexes unchanged --- in contrast to Drule.instantiate_normalize. * ML antiquotation "instantiate" allows to instantiate formal entities (types, terms, theorems) with values given ML. This works uniformly for "typ", "term", "prop", "ctyp", "cterm", "cprop", "lemma" --- given as a keyword after the instantiation. A mode "(schematic)" behind the keyword means that some variables may remain uninstantiated (fixed in the specification and schematic in the result); by default, all variables need to be instantiated. Newly emerging abstractions are applied to their arguments in the term (using Thm.instantiate_beta). Examples in HOL: fun make_assoc_type (A, B) = \<^instantiate>\'a = A and 'b = B in typ \('a \ 'b) list\\; val make_assoc_list = map (fn (x, y) => \<^instantiate>\'a = \fastype_of x\ and 'b = \fastype_of y\ and x and y in term \(x, y)\ for x :: 'a and y :: 'b\); fun symmetry x y = \<^instantiate>\'a = \Thm.ctyp_of_cterm x\ and x and y in lemma \x = y \ y = x\ for x y :: 'a by simp\ fun symmetry_schematic A = \<^instantiate>\'a = A in lemma (schematic) \x = y \ y = x\ for x y :: 'a by simp\ * ML antiquotation for embedded lemma supports local fixes, as usual in many other Isar language elements. For example: @{lemma "x = x" for x :: nat by (rule refl)} * ML antiquotations for type constructors and term constants: \<^Type>\c\ \<^Type>\c T \\ \ \same with type arguments\ \<^Type_fn>\c T \\ \ \fn abstraction, failure via exception TYPE\ \<^Const>\c\ \<^Const>\c T \\ \ \same with type arguments\ \<^Const>\c for t \\ \ \same with term arguments\ \<^Const_>\c \\ \ \same for patterns: case, let, fn\ \<^Const_fn>\c T \\ \ \fn abstraction, failure via exception TERM\ The type/term arguments refer to nested ML source, which may contain antiquotations recursively. The following argument syntax is supported: - an underscore (dummy pattern) - an atomic item of "embedded" syntax, e.g. identifier or cartouche - an antiquotation in control-symbol/cartouche form, e.g. \<^Type>\c\ as short form of \\<^Type>\c\\. Examples in HOL: val natT = \<^Type>\nat\; fun mk_funT (A, B) = \<^Type>\fun A B\; val dest_funT = \<^Type_fn>\fun A B => \(A, B)\\; fun mk_conj (A, B) = \<^Const>\conj for A B\; val dest_conj = \<^Const_fn>\conj for A B => \(A, B)\\; fun mk_eq T (t, u) = \<^Const>\HOL.eq T for t u\; val dest_eq = \<^Const_fn>\HOL.eq T for t u => \(T, (t, u))\\; * ML antiquotations \<^make_judgment> and \<^dest_judgment> refer to corresponding functions for the object-logic of the ML compilation context. This supersedes older mk_Trueprop / dest_Trueprop operations. * The "build" combinators of various data structures help to build content from bottom-up, by applying an "add" function the "empty" value. For example: - type 'a Symtab.table etc.: build - type 'a Names.table etc.: build - type 'a list: build and build_rev - type Buffer.T: build and build_content For example, see src/Pure/PIDE/xml.ML: val content_of = Buffer.build_content o fold add_content; * ML antiquotations \<^try>\expr\ and \<^can>\expr\ operate directly on the given ML expression, in contrast to functions "try" and "can" that modify application of a function. * ML antiquotations for conditional ML text: \<^if_linux>\...\ \<^if_macos>\...\ \<^if_windows>\...\ \<^if_unix>\...\ * ML profiling has been updated and reactivated, after some degration in Isabelle2021: - "isabelle build -o threads=1 -o profiling=..." works properly within the PIDE session context; - "isabelle profiling_report" now uses the session build database (like "isabelle log"); - output uses non-intrusive tracing messages, instead of warnings. *** System *** * Almost complete support for arm64-linux platform. The reference platform is Raspberry Pi 4 with 8 GB RAM running Pi OS (64 bit). * Update to OpenJDK 17: the current long-term support version of Java. * Update to Poly/ML 5.9 with improved support for ARM on Linux. On macOS, the Intel version works more smoothly with Rosetta 2, as already used in Isabelle2021. Further changes to Poly/ML are documented here: http://lists.inf.ed.ac.uk/pipermail/polyml/2021-May/002451.html * Perl is no longer required by Isabelle proper, and no longer provided by specific Isabelle execution environments (Docker, Cygwin on Windows). Minor INCOMPATIBILITY, add-on applications involving perl need to provide it by different means. (Note that proper Isabelle systems programming works via Scala/Java, without perl, python, ruby etc.). * Each Isabelle component may specify a Scala/Java jar module declaratively via etc/build.props (file names are relative to the component directory). E.g. see $ISABELLE_HOME/etc/build.props with further explanations in the "system" manual. * Command-line tool "isabelle scala_build" allows to invoke the build process of all Scala/Java modules explicitly. Normally this is done implicitly on demand, e.g. for "isabelle scala" or "isabelle jedit". * Command-line tool "isabelle scala_project" is has been improved in various ways: - sources from all components with etc/build.props are included, - sources of for the jEdit text editor and the Isabelle/jEdit plugins (jedit_base and jedit_main) are included by default, - more sources may be given on the command-line, - options -f and -D make the tool more convenient, - Gradle has been replaced by Maven (less ambitious and more robust). * Remote provers from SystemOnTPTP (notably for Sledgehammer) are now managed via Isabelle/Scala instead of perl; the dependency on libwww-perl has been eliminated (notably on Linux). Rare INCOMPATIBILITY: HTTP proxy configuration now works via JVM properties https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/net/doc-files/net-properties.html * System options may declare an implicit standard value, which is used when the option is activated without providing an explicit value, e.g. "isabelle build -o document -o document_output" instead of "isabelle build -o document=true -o document_output=output". For options of type "bool", the standard is always "true" and cannot be specified differently. * System option "document=true" is an alias for "document=pdf", and "document=false" is an alias for "document=" (empty string). * System option "system_log" specifies an optional log file for internal messages produced by Output.system_message in Isabelle/ML; the standard value "-" refers to console progress of the build job. This works for "isabelle build" or any derivative of it. * Command-line tool "isabelle version" supports repository archives (without full .hg directory). It also provides more options. * Obsolete settings variable ISABELLE_PLATFORM32 has been discontinued. Note that only Windows supports old 32 bit executables, via settings variable ISABELLE_WINDOWS_PLATFORM32. Everything else should be ISABELLE_PLATFORM64 (generic Posix) or ISABELLE_WINDOWS_PLATFORM64 (native Windows) or ISABELLE_APPLE_PLATFORM64 (Apple Silicon). * Timeouts for Isabelle/ML tools are subject to system option "timeout_scale", to support adjustments to slow machines. Before, timeout_scale was only used for the overall session build process, now it affects the underlying Timeout.apply in Isabelle/ML as well. It treats a timeout specification 0 as "no timeout", instead of "immediate timeout". Rare INCOMPATIBILITY in boundary cases. New in Isabelle2021 (February 2021) ----------------------------------- *** General *** * On macOS, the IsabelleXYZ.app directory layout now follows the other platforms, without indirection via Contents/Resources/. INCOMPATIBILITY, use e.g. IsabelleXYZ.app/bin/isabelle instead of former IsabelleXYZ.app/Isabelle/bin/isabelle or IsabelleXYZ.app/Isabelle/Contents/Resources/IsabelleXYZ/bin/isabelle. * HTML presentation uses rich markup produced by Isabelle/PIDE, resulting in more colors and links. * HTML presentation includes auxiliary files (e.g. ML) for each theory. * Proof method "subst" is confined to the original subgoal range: its included distinct_subgoals_tac no longer affects unrelated subgoals. Rare INCOMPATIBILITY. * Theory_Data extend operation is obsolete and needs to be the identity function; merge should be conservative and not reset to the empty value. Subtle INCOMPATIBILITY and change of semantics (due to Theory.join_theory from Isabelle2020). Special extend/merge behaviour at the begin of a new theory can be achieved via Theory.at_begin. *** Isabelle/jEdit Prover IDE *** * Improved GUI look-and-feel: the portable and scalable "FlatLaf Light" is used by default on all platforms (appearance similar to IntelliJ IDEA). * Improved markup for theory header imports: hyperlinks for theory files work without formal checking of content. * The prover process can download auxiliary files (e.g. 'ML_file') for theories with remote URL. This requires the external "curl" program. * Action "isabelle.goto-entity" (shortcut CS+d) jumps to the definition of the formal entity at the caret position. * The visual feedback on caret entity focus is normally restricted to definitions within the visible text area. The keyboard modifier "CS" overrides this: then all defining and referencing positions are shown. See also option "jedit_focus_modifier". * The jEdit status line includes widgets both for JVM and ML heap usage. Ongoing ML ongoing garbage collection is shown as "ML cleanup". * The Monitor dockable provides buttons to request a full garbage collection and sharing of live data on the ML heap. It also includes information about the Java Runtime system. * PIDE support for session ROOTS: markup for directories. * Update to jedit-5.6.0, the latest release. This version works properly on macOS by default, without the special MacOSX plugin. * Action "full-screen-mode" (shortcut F11 or S+F11) has been modified for better approximate window size on macOS and Linux/X11. * Improved GUI support for macOS 11.1 Big Sur: native fullscreen mode, but non-native look-and-feel (FlatLaf). * Hyperlinks to various file-formats (.pdf, .png, etc.) open an external viewer, instead of re-using the jEdit text editor. * IDE support for Naproche-SAD: Proof Checking of Natural Mathematical Documents. See also $NAPROCHE_HOME/examples for files with .ftl or .ftl.tex extension. The corresponding Naproche-SAD server process can be disabled by setting the system option naproche_server=false and restarting the Isabelle application. *** Document preparation *** * Keyword 'document_theories' within ROOT specifies theories from other sessions that should be included in the generated document source directory. This does not affect the generated session.tex: \input{...} needs to be used separately. * The standard LaTeX engine is now lualatex, according to settings variable ISABELLE_PDFLATEX. This is mostly upwards compatible with old pdflatex, but text encoding needs to conform strictly to utf8. Rare INCOMPATIBILITY. * Discontinued obsolete DVI format and ISABELLE_LATEX settings variable: document output is always PDF. * Antiquotation @{tool} refers to Isabelle command-line tools, with completion and formal reference to the source (external script or internal Scala function). * Antiquotation @{bash_function} refers to GNU bash functions that are checked within the Isabelle settings environment. * Antiquotations @{scala}, @{scala_object}, @{scala_type}, @{scala_method} refer to checked Isabelle/Scala entities. *** Pure *** * Session Pure-Examples contains notable examples for Isabelle/Pure (former entries of HOL-Isar_Examples). * Named contexts (locale and class specifications, locale and class context blocks) allow bundle mixins for the surface context. This allows syntax notations to be organized within bundles conveniently. See theory "HOL-ex.Specifications_with_bundle_mixins" for examples and the isar-ref manual for syntax descriptions. * Definitions in locales produce rule which can be added as congruence rule to protect foundational terms during simplification. * Consolidated terminology and function signatures for nested targets: - Local_Theory.begin_nested replaces Local_Theory.open_target - Local_Theory.end_nested replaces Local_Theory.close_target - Combination of Local_Theory.begin_nested and Local_Theory.end_nested(_result) replaces Local_Theory.subtarget(_result) INCOMPATIBILITY. * Local_Theory.init replaces Generic_Target.init. Minor INCOMPATIBILITY. *** HOL *** * Session HOL-Examples contains notable examples for Isabelle/HOL (former entries of HOL-Isar_Examples, HOL-ex etc.). * An updated version of the veriT solver is now included as Isabelle component. It can be used in the "smt" proof method via "smt (verit)" or via "declare [[smt_solver = verit]]" in the context; see also session HOL-Word-SMT_Examples. * Zipperposition 2.0 is now included as Isabelle component for experimentation, e.g. in "sledgehammer [prover = zipperposition]". * Sledgehammer: - support veriT in proof preplay - take adventage of more cores in proof preplay * Updated the Metis prover underlying the "metis" proof method to version 2.4 (release 20180810). The new version fixes one soundness defect and two incompleteness defects. Very slight INCOMPATIBILITY. * Nitpick/Kodkod may be invoked directly within the running Isabelle/Scala session (instead of an external Java process): this improves reactivity and saves resources. This experimental feature is guarded by system option "kodkod_scala" (default: true in PIDE interaction, false in batch builds). * Simproc "defined_all" and rewrite rule "subst_all" perform more aggressive substitution with variables from assumptions. INCOMPATIBILITY, consider repairing proofs locally like this: supply subst_all [simp del] [[simproc del: defined_all]] * Simproc "datatype_no_proper_subterm" rewrites equalities "lhs = rhs" on datatypes to "False" if either side is a proper subexpression of the other (for any datatype with a reasonable size function). * Syntax for state monad combinators fcomp and scomp is organized in bundle state_combinator_syntax. Minor INCOMPATIBILITY. * Syntax for reflected term syntax is organized in bundle term_syntax, discontinuing previous locale term_syntax. Minor INCOMPATIBILITY. * New constant "power_int" for exponentiation with integer exponent, written as "x powi n". * Added the "at most 1" quantifier, Uniq. * For the natural numbers, "Sup {} = 0". * New constant semiring_char gives the characteristic of any type of class semiring_1, with the convenient notation CHAR('a). For example, CHAR(nat) = CHAR(int) = CHAR(real) = 0, CHAR(17) = 17. * HOL-Computational_Algebra.Polynomial: Definition and basic properties of algebraic integers. * Library theory "Bit_Operations" with generic bit operations. * Library theory "Signed_Division" provides operations for signed division, instantiated for type int. * Theory "Multiset": removed misleading notation \# for sum_mset; replaced with \\<^sub>#. Analogous notation for prod_mset also exists now. * New theory "HOL-Library.Word" takes over material from former session "HOL-Word". INCOMPATIBILITY: need to adjust imports. * Theory "HOL-Library.Word": Type word is restricted to bit strings consisting of at least one bit. INCOMPATIBILITY. * Theory "HOL-Library.Word": Bit operations NOT, AND, OR, XOR are based on generic algebraic bit operations from theory "HOL-Library.Bit_Operations". INCOMPATIBILITY. * Theory "HOL-Library.Word": Most operations on type word are set up for transfer and lifting. INCOMPATIBILITY. * Theory "HOL-Library.Word": Generic type conversions. INCOMPATIBILITY, sometimes additional rewrite rules must be added to applications to get a confluent system again. * Theory "HOL-Library.Word": Uniform polymorphic "mask" operation for both types int and word. INCOMPATIBILITY. * Theory "HOL-Library.Word": Syntax for signed compare operators has been consolidated with syntax of regular compare operators. Minor INCOMPATIBILITY. * Former session "HOL-Word": Various operations dealing with bit values represented as reversed lists of bools are separated into theory Reversed_Bit_Lists in session Word_Lib in the AFP. INCOMPATIBILITY. * Former session "HOL-Word": Theory "Word_Bitwise" has been moved to AFP entry Word_Lib as theory "Bitwise". INCOMPATIBILITY. * Former session "HOL-Word": Compound operation "bin_split" simplifies by default into its components "drop_bit" and "take_bit". INCOMPATIBILITY. * Former session "HOL-Word": Operations lsb, msb and set_bit are separated into theories Least_significant_bit, Most_significant_bit and Generic_set_bit respectively in session Word_Lib in the AFP. INCOMPATIBILITY. * Former session "HOL-Word": Ancient int numeral representation has been factored out in separate theory "Ancient_Numeral" in session Word_Lib in the AFP. INCOMPATIBILITY. * Former session "HOL-Word": Operations "bin_last", "bin_rest", "bin_nth", "bintrunc", "sbintrunc", "norm_sint", "bin_cat" and "max_word" are now mere input abbreviations. Minor INCOMPATIBILITY. * Former session "HOL-Word": Misc ancient material has been factored out into separate theories and moved to session Word_Lib in the AFP. See theory "Guide" there for further information. INCOMPATIBILITY. * Session HOL-TPTP: The "tptp_isabelle" and "tptp_sledgehammer" commands are in working order again, as opposed to outputting "GaveUp" on nearly all problems. * Session "HOL-Hoare": concrete syntax only for Hoare triples, not abstract language constructors. * Session "HOL-Hoare": now provides a total correctness logic as well. *** FOL *** * Added the "at most 1" quantifier, Uniq, as in HOL. * Simproc "defined_all" and rewrite rule "subst_all" have been changed as in HOL. *** ML *** * Antiquotations @{scala_function}, @{scala}, @{scala_thread} refer to registered Isabelle/Scala functions (of type String => String): invocation works via the PIDE protocol. * Path.append is available as overloaded "+" operator, similar to corresponding Isabelle/Scala operation. * ML statistics via an external Poly/ML process: this allows monitoring the runtime system while the ML program sleeps. *** System *** * Isabelle server allows user-defined commands via isabelle_scala_service. * Update/rebuild external provers on currently supported OS platforms, notably CVC4 1.8, E prover 2.5, SPASS 3.8ds, CSDP 6.1.1. * The command-line tool "isabelle log" prints prover messages from the build database of the given session, following the the order of theory sources, instead of erratic parallel evaluation. Consequently, the session log file is restricted to system messages of the overall build process, and thus becomes more informative. * Discontinued obsolete isabelle display tool, and DVI_VIEWER settings variable. * The command-line tool "isabelle logo" only outputs PDF; obsolete EPS (for DVI documents) has been discontinued. Former option -n has been turned into -o with explicit file name. Minor INCOMPATIBILITY. * The command-line tool "isabelle components" supports new options -u and -x to manage $ISABELLE_HOME_USER/etc/components without manual editing of Isabelle configuration files. * The shell function "isabelle_directory" (within etc/settings of components) augments the list of special directories for persistent symbolic path names. This improves portability of heap images and session databases. It used to be hard-wired for Isabelle + AFP, but other projects may now participate on equal terms. * The command-line tool "isabelle process" now prints output to stdout/stderr separately and incrementally, instead of just one bulk to stdout after termination. Potential INCOMPATIBILITY for external tools. * The command-line tool "isabelle console" now supports interrupts properly (on Linux and macOS). * Batch-builds via "isabelle build" use a PIDE session with special protocol: this allows to invoke Isabelle/Scala operations from Isabelle/ML. Big build jobs (e.g. AFP) require extra heap space for the java process, e.g. like this in $ISABELLE_HOME_USER/etc/settings: ISABELLE_TOOL_JAVA_OPTIONS="$ISABELLE_TOOL_JAVA_OPTIONS -Xmx8g" This includes full PIDE markup, if option "build_pide_reports" is enabled. * The command-line tool "isabelle build" provides option -P DIR to produce PDF/HTML presentation in the specified directory; -P: refers to the standard directory according to ISABELLE_BROWSER_INFO / ISABELLE_BROWSER_INFO_SYSTEM settings. Generated PDF documents are taken from the build database -- from this or earlier builds with option document=pdf. * The command-line tool "isabelle document" generates theory documents on the spot, using the underlying session build database (exported LaTeX sources or existing PDF files). INCOMPATIBILITY, the former "isabelle document" tool was rather different and has been discontinued. * The command-line tool "isabelle sessions" explores the structure of Isabelle sessions and prints result names in topological order (on stdout). * The Isabelle/Scala "Progress" interface changed slightly and "No_Progress" has been discontinued. INCOMPATIBILITY, use "new Progress" instead. * General support for Isabelle/Scala system services, configured via the shell function "isabelle_scala_service" in etc/settings (e.g. of an Isabelle component); see implementations of class Isabelle_System.Service in Isabelle/Scala. This supersedes former "isabelle_scala_tools" and "isabelle_file_format": minor INCOMPATIBILITY. * The syntax of theory load commands (for auxiliary files) is now specified in Isabelle/Scala, as instance of class isabelle.Command_Span.Load_Command registered via isabelle_scala_service in etc/settings. This allows more flexible schemes than just a list of file extensions. Minor INCOMPATIBILITY, e.g. see theory HOL-SPARK.SPARK_Setup to emulate the old behaviour. * JVM system property "isabelle.laf" has been discontinued; the default Swing look-and-feel is ""FlatLaf Light". * Isabelle/Phabricator supports Ubuntu 20.04 LTS. * Isabelle/Phabricator setup has been updated to follow ongoing development: libphutil has been discontinued. Minor INCOMPATIBILITY: existing server installations should remove libphutil from /usr/local/bin/isabelle-phabricator-upgrade and each installation root directory (e.g. /var/www/phabricator-vcs/libphutil). * Experimental support for arm64-linux platform. The reference platform is Raspberry Pi 4 with 8 GB RAM running Pi OS (64 bit). * Support for Apple Silicon, using mostly x86_64-darwin runtime translation via Rosetta 2 (e.g. Poly/ML and external provers), but also some native arm64-darwin executables (e.g. Java). New in Isabelle2020 (April 2020) -------------------------------- *** General *** * Session ROOT files need to specify explicit 'directories' for import of theory files. Directories cannot be shared by different sessions. (Recall that import of theories from other sessions works via session-qualified theory names, together with suitable 'sessions' declarations in the ROOT.) * Internal derivations record dependencies on oracles and other theorems accurately, including the implicit type-class reasoning wrt. proven class relations and type arities. In particular, the formal tagging with "Pure.skip_proofs" of results stemming from "instance ... sorry" is now propagated properly to theorems depending on such type instances. * Command 'sorry' (oracle "Pure.skip_proofs") is more precise about the actual proposition that is assumed in the goal and proof context. This requires at least Proofterm.proofs = 1 to show up in theorem dependencies. * Command 'thm_oracles' prints all oracles used in given theorems, covering the full graph of transitive dependencies. * Command 'thm_deps' prints immediate theorem dependencies of the given facts. The former graph visualization has been discontinued, because it was hardly usable. * Refined treatment of proof terms, including type-class proofs for minor object-logics (FOL, FOLP, Sequents). * The inference kernel is now confined to one main module: structure Thm, without the former circular dependency on structure Axclass. * Mixfix annotations may use "' " (single quote followed by space) to separate delimiters (as documented in the isar-ref manual), without requiring an auxiliary empty block. A literal single quote needs to be escaped properly. Minor INCOMPATIBILITY. *** Isar *** * The proof method combinator (subproofs m) applies the method expression m consecutively to each subgoal, constructing individual subproofs internally. This impacts the internal construction of proof terms: it makes a cascade of let-expressions within the derivation tree and may thus improve scalability. * Attribute "trace_locales" activates tracing of locale instances during roundup. It replaces the diagnostic command 'print_dependencies', which has been discontinued. *** Isabelle/jEdit Prover IDE *** * Prover IDE startup is now much faster, because theory dependencies are no longer explored in advance. The overall session structure with its declarations of 'directories' is sufficient to locate theory files. Thus the "session focus" of option "isabelle jedit -S" has become obsolete (likewise for "isabelle vscode_server -S"). Existing option "-R" is both sufficient and more convenient to start editing a particular session. * Actions isabelle.tooltip (CS+b) and isabelle.message (CS+m) display tooltip message popups, corresponding to mouse hovering with/without the CONTROL/COMMAND key pressed. * The following actions allow to navigate errors within the current document snapshot: isabelle.first-error (CS+a) isabelle.last-error (CS+z) isabelle.next-error (CS+n) isabelle.prev-error (CS+p) * Support more brackets: \ \ (intended for implicit argument syntax). * Action isabelle.jconsole (menu item Plugins / Isabelle / Java/VM Monitor) applies the jconsole tool on the running Isabelle/jEdit process. This allows to monitor resource usage etc. * More adequate default font sizes for Linux on HD / UHD displays: automatic font scaling is usually absent on Linux, in contrast to Windows and macOS. * The default value for the jEdit property "view.antiAlias" (menu item Utilities / Global Options / Text Area / Anti Aliased smooth text) is now "subpixel HRGB", instead of former "standard". Especially on Linux this often leads to faster text rendering, but can also cause problems with odd color shades. An alternative is to switch back to "standard" here, and set the following Java system property: isabelle jedit -Dsun.java2d.opengl=true This can be made persistent via JEDIT_JAVA_OPTIONS in $ISABELLE_HOME_USER/etc/settings. For the "Isabelle2020" desktop application there is a corresponding options file in the same directory. *** Isabelle/VSCode Prover IDE *** * Update of State and Preview panels to use new WebviewPanel API of VSCode. *** HOL *** * Improvements of the 'lift_bnf' command: - Add support for quotient types. - Generate transfer rules for the lifted map/set/rel/pred constants (theorems "._transfer_raw"). * Term_XML.Encode/Decode.term uses compact representation of Const "typargs" from the given declaration environment. This also makes more sense for translations to lambda-calculi with explicit polymorphism. INCOMPATIBILITY, use Term_XML.Encode/Decode.term_raw in special applications. * ASCII membership syntax concerning big operators for infimum and supremum has been discontinued. INCOMPATIBILITY. * Removed multiplicativity assumption from class "normalization_semidom". Introduced various new intermediate classes with the multiplicativity assumption; many theorem statements (especially involving GCD/LCM) had to be adapted. This allows for a more natural instantiation of the algebraic typeclasses for e.g. Gaussian integers. INCOMPATIBILITY. * Clear distinction between types for bits (False / True) and Z2 (0 / 1): theory HOL-Library.Bit has been renamed accordingly. INCOMPATIBILITY. * Dynamic facts "algebra_split_simps" and "field_split_simps" correspond to algebra_simps and field_simps but contain more aggressive rules potentially splitting goals; algebra_split_simps roughly replaces sign_simps and field_split_simps can be used instead of divide_simps. INCOMPATIBILITY. * Theory HOL.Complete_Lattices: renamed Inf_Sup -> Inf_eq_Sup and Sup_Inf -> Sup_eq_Inf * Theory HOL-Library.Monad_Syntax: infix operation "bind" (\) associates to the left now as is customary. * Theory HOL-Library.Ramsey: full finite Ramsey's theorem with multiple colours and arbitrary exponents. * Session HOL-Proofs: build faster thanks to better treatment of proof terms in Isabelle/Pure. * Session HOL-Word: bitwise NOT-operator has proper prefix syntax. Minor INCOMPATIBILITY. * Session HOL-Analysis: proof method "metric" implements a decision procedure for simple linear statements in metric spaces. * Session HOL-Complex_Analysis has been split off from HOL-Analysis. *** ML *** * Theory construction may be forked internally, the operation Theory.join_theory recovers a single result theory. See also the example in theory "HOL-ex.Join_Theory". * Antiquotation @{oracle_name} inlines a formally checked oracle name. * Minimal support for a soft-type system within the Isabelle logical framework (module Soft_Type_System). * Former Variable.auto_fixes has been replaced by slightly more general Proof_Context.augment: it is subject to an optional soft-type system of the underlying object-logic. Minor INCOMPATIBILITY. * More scalable Export.export using XML.tree to avoid premature string allocations, with convenient shortcut XML.blob. Minor INCOMPATIBILITY. * Prover IDE support for the underlying Poly/ML compiler (not the basis library). Open $ML_SOURCES/ROOT.ML in Isabelle/jEdit to browse the implementation with full markup. *** System *** * Standard rendering for more Isabelle symbols: \ \ \ \ * The command-line tool "isabelle scala_project" creates a Gradle project configuration for Isabelle/Scala/jEdit, to support Scala IDEs such as IntelliJ IDEA. * The command-line tool "isabelle phabricator_setup" facilitates self-hosting of the Phabricator software-development platform, with support for Git, Mercurial, Subversion repositories. This helps to avoid monoculture and to escape the gravity of centralized version control by Github and/or Bitbucket. For further documentation, see chapter "Phabricator server administration" in the "system" manual. A notable example installation is https://isabelle-dev.sketis.net/. * The command-line tool "isabelle hg_setup" simplifies the setup of Mercurial repositories, with hosting via Phabricator or SSH file server access. * The command-line tool "isabelle imports" has been discontinued: strict checking of session directories enforces session-qualified theory names in applications -- users are responsible to specify session ROOT entries properly. * The command-line tool "isabelle dump" and its underlying Isabelle/Scala module isabelle.Dump has become more scalable, by splitting sessions and supporting a base logic image. Minor INCOMPATIBILITY in options and parameters. * The command-line tool "isabelle build_docker" has been slightly improved: it is now properly documented in the "system" manual. * Isabelle/Scala support for the Linux platform (Ubuntu): packages, users, system services. * Isabelle/Scala support for proof terms (with full type/term information) in module isabelle.Term. * Isabelle/Scala: more scalable output of YXML files, e.g. relevant for "isabelle dump". * Theory export via Isabelle/Scala has been reworked. The former "fact" name space is now split into individual "thm" items: names are potentially indexed, such as "foo" for singleton facts, or "bar(1)", "bar(2)", "bar(3)" for multi-facts. Theorem dependencies are now exported as well: this spans an overall dependency graph of internal inferences; it might help to reconstruct the formal structure of theory libraries. See also the module isabelle.Export_Theory in Isabelle/Scala. * Theory export of structured specifications, based on internal declarations of Spec_Rules by packages like 'definition', 'inductive', 'primrec', 'function'. * Old settings variables ISABELLE_PLATFORM and ISABELLE_WINDOWS_PLATFORM have been discontinued -- deprecated since Isabelle2018. * More complete x86_64 platform support on macOS, notably Catalina where old x86 has been discontinued. * Update to GHC stack 2.1.3 with stackage lts-13.19/ghc-8.6.4. * Update to OCaml Opam 2.0.6 (using ocaml 4.05.0 as before). New in Isabelle2019 (June 2019) ------------------------------- *** General *** * The font collection "Isabelle DejaVu" is systematically derived from the existing "DejaVu" fonts, with variants "Sans Mono", "Sans", "Serif" and styles "Normal", "Bold", "Italic/Oblique", "Bold-Italic/Oblique". The DejaVu base fonts are retricted to well-defined Unicode ranges and augmented by special Isabelle symbols, taken from the former "IsabelleText" font (which is no longer provided separately). The line metrics and overall rendering quality is closer to original DejaVu. INCOMPATIBILITY with display configuration expecting the old "IsabelleText" font: use e.g. "Isabelle DejaVu Sans Mono" instead. * The Isabelle fonts render "\" properly as superscript "-1". * Old-style inner comments (* ... *) within the term language are no longer supported (legacy feature in Isabelle2018). * Old-style {* verbatim *} tokens are explicitly marked as legacy feature and will be removed soon. Use \cartouche\ syntax instead, e.g. via "isabelle update_cartouches -t" (available since Isabelle2015). * Infix operators that begin or end with a "*" are now parenthesized without additional spaces, e.g. "(*)" instead of "( * )". Minor INCOMPATIBILITY. * Mixfix annotations may use cartouches instead of old-style double quotes, e.g. (infixl \+\ 60). The command-line tool "isabelle update -u mixfix_cartouches" allows to update existing theory sources automatically. * ML setup commands (e.g. 'setup', 'method_setup', 'parse_translation') need to provide a closed expression -- without trailing semicolon. Minor INCOMPATIBILITY. * Commands 'generate_file', 'export_generated_files', and 'compile_generated_files' support a stateless (PIDE-conformant) model for generated sources and compiled binaries of other languages. The compilation process is managed in Isabelle/ML, and results exported to the session database for further use (e.g. with "isabelle export" or "isabelle build -e"). *** Isabelle/jEdit Prover IDE *** * Fonts for the text area, gutter, GUI elements etc. use the "Isabelle DejaVu" collection by default, which provides uniform rendering quality with the usual Isabelle symbols. Line spacing no longer needs to be adjusted: properties for the old IsabelleText font had "Global Options / Text Area / Extra vertical line spacing (in pixels): -2", it now defaults to 1, but 0 works as well. * The jEdit File Browser is more prominent in the default GUI layout of Isabelle/jEdit: various virtual file-systems provide access to Isabelle resources, notably via "favorites:" (or "Edit Favorites"). * Further markup and rendering for "plain text" (e.g. informal prose) and "raw text" (e.g. verbatim sources). This improves the visual appearance of formal comments inside the term language, or in general for repeated alternation of formal and informal text. * Action "isabelle-export-browser" points the File Browser to the theory exports of the current buffer, based on the "isabelle-export:" virtual file-system. The directory view needs to be reloaded manually to follow ongoing document processing. * Action "isabelle-session-browser" points the File Browser to session information, based on the "isabelle-session:" virtual file-system. Its entries are structured according to chapter / session names, the open operation is redirected to the session ROOT file. * Support for user-defined file-formats via class isabelle.File_Format in Isabelle/Scala (e.g. see isabelle.Bibtex.File_Format), configured via the shell function "isabelle_file_format" in etc/settings (e.g. of an Isabelle component). * System option "jedit_text_overview" allows to disable the text overview column. * Command-line options "-s" and "-u" of "isabelle jedit" override the default for system option "system_heaps" that determines the heap storage directory for "isabelle build". Option "-n" is now clearly separated from option "-s". * The Isabelle/jEdit desktop application uses the same options as "isabelle jedit" for its internal "isabelle build" process: the implicit option "-o system_heaps" (or "-s") has been discontinued. This reduces the potential for surprise wrt. command-line tools. * The official download of the Isabelle/jEdit application already contains heap images for Isabelle/HOL within its main directory: thus the first encounter becomes faster and more robust (e.g. when run from a read-only directory). * Isabelle DejaVu fonts are available with hinting by default, which is relevant for low-resolution displays. This may be disabled via system option "isabelle_fonts_hinted = false" in $ISABELLE_HOME_USER/etc/preferences -- it occasionally yields better results. * OpenJDK 11 has quite different font rendering, with better glyph shapes and improved sub-pixel anti-aliasing. In some situations results might be *worse* than Oracle Java 8, though -- a proper HiDPI / UHD display is recommended. * OpenJDK 11 supports GTK version 2.2 and 3 (according to system property jdk.gtk.version). The factory default is version 3, but ISABELLE_JAVA_SYSTEM_OPTIONS includes "-Djdk.gtk.version=2.2" to make this more conservative (as in Java 8). Depending on the GTK theme configuration, "-Djdk.gtk.version=3" might work better or worse. *** Document preparation *** * Document markers are formal comments of the form \<^marker>\marker_body\ that are stripped from document output: the effect is to modify the semantic presentation context or to emit markup to the PIDE document. Some predefined markers are taken from the Dublin Core Metadata Initiative, e.g. \<^marker>\contributor arg\ or \<^marker>\license arg\ and produce PIDE markup that can be retrieved from the document database. * Old-style command tags %name are re-interpreted as markers with proof-scope \<^marker>\tag (proof) name\ and produce LaTeX environments as before. Potential INCOMPATIBILITY: multiple markers are composed in canonical order, resulting in a reversed list of tags in the presentation context. * Marker \<^marker>\tag name\ does not apply to the proof of a top-level goal statement by default (e.g. 'theorem', 'lemma'). This is a subtle change of semantics wrt. old-style %name. * In Isabelle/jEdit, the string "\tag" may be completed to a "\<^marker>\tag \" template. * Document antiquotation option "cartouche" indicates if the output should be delimited as cartouche; this takes precedence over the analogous option "quotes". * Many document antiquotations are internally categorized as "embedded" and expect one cartouche argument, which is typically used with the \<^control>\cartouche\ notation (e.g. \<^term>\\x y. x\). The cartouche delimiters are stripped in output of the source (antiquotation option "source"), but it is possible to enforce delimiters via option "source_cartouche", e.g. @{term [source_cartouche] \\x y. x\}. *** Isar *** * Implicit cases goal1, goal2, goal3, etc. have been discontinued (legacy feature since Isabelle2016). * More robust treatment of structural errors: begin/end blocks take precedence over goal/proof. This is particularly relevant for the headless PIDE session and server. * Command keywords of kind thy_decl / thy_goal may be more specifically fit into the traditional document model of "definition-statement-proof" via thy_defn / thy_stmt / thy_goal_defn / thy_goal_stmt. *** HOL *** * Command 'export_code' produces output as logical files within the theory context, as well as formal session exports that can be materialized via command-line tools "isabelle export" or "isabelle build -e" (with 'export_files' in the session ROOT). Isabelle/jEdit also provides a virtual file-system "isabelle-export:" that can be explored in the regular file-browser. A 'file_prefix' argument allows to specify an explicit name prefix for the target file (SML, OCaml, Scala) or directory (Haskell); the default is "export" with a consecutive number within each theory. * Command 'export_code': the 'file' argument is now legacy and will be removed soon: writing to the physical file-system is not well-defined in a reactive/parallel application like Isabelle. The empty 'file' argument has been discontinued already: it is superseded by the file-browser in Isabelle/jEdit on "isabelle-export:". Minor INCOMPATIBILITY. * Command 'code_reflect' no longer supports the 'file' argument: it has been superseded by 'file_prefix' for stateless file management as in 'export_code'. Minor INCOMPATIBILITY. * Code generation for OCaml: proper strings are used for literals. Minor INCOMPATIBILITY. * Code generation for OCaml: Zarith supersedes Nums as library for proper integer arithmetic. The library is located via standard invocations of "ocamlfind" (via ISABELLE_OCAMLFIND settings variable). The environment provided by "isabelle ocaml_setup" already contains this tool and the required packages. Minor INCOMPATIBILITY. * Code generation for Haskell: code includes for Haskell must contain proper module frame, nothing is added magically any longer. INCOMPATIBILITY. * Code generation: slightly more conventional syntax for 'code_stmts' antiquotation. Minor INCOMPATIBILITY. * Theory List: the precedence of the list_update operator has changed: "f a [n := x]" now needs to be written "(f a)[n := x]". * The functions \, \, \, \ (not the corresponding binding operators) now have the same precedence as any other prefix function symbol. Minor INCOMPATIBILITY. * Simplified syntax setup for big operators under image. In rare situations, type conversions are not inserted implicitly any longer and need to be given explicitly. Auxiliary abbreviations INFIMUM, SUPREMUM, UNION, INTER should now rarely occur in output and are just retained as migration auxiliary. Abbreviations MINIMUM and MAXIMUM are gone INCOMPATIBILITY. * The simplifier uses image_cong_simp as a congruence rule. The historic and not really well-formed congruence rules INF_cong*, SUP_cong*, are not used by default any longer. INCOMPATIBILITY; consider using declare image_cong_simp [cong del] in extreme situations. * INF_image and SUP_image are no default simp rules any longer. INCOMPATIBILITY, prefer image_comp as simp rule if needed. * Strong congruence rules (with =simp=> in the premises) for constant f are now uniformly called f_cong_simp, in accordance with congruence rules produced for mappers by the datatype package. INCOMPATIBILITY. * Retired lemma card_Union_image; use the simpler card_UN_disjoint instead. INCOMPATIBILITY. * Facts sum_mset.commute and prod_mset.commute have been renamed to sum_mset.swap and prod_mset.swap, similarly to sum.swap and prod.swap. INCOMPATIBILITY. * ML structure Inductive: slightly more conventional naming schema. Minor INCOMPATIBILITY. * ML: Various _global variants of specification tools have been removed. Minor INCOMPATIBILITY, prefer combinators Named_Target.theory_map[_result] to lift specifications to the global theory level. * Theory HOL-Library.Simps_Case_Conv: 'case_of_simps' now supports overlapping and non-exhaustive patterns and handles arbitrarily nested patterns. It uses on the same algorithm as HOL-Library.Code_Lazy, which assumes sequential left-to-right pattern matching. The generated equation no longer tuples the arguments on the right-hand side. INCOMPATIBILITY. * Theory HOL-Library.Multiset: the \# operator now has the same precedence as any other prefix function symbol. * Theory HOL-Library.Cardinal_Notations has been discontinued in favor of the bundle cardinal_syntax (available in theory Main). Minor INCOMPATIBILITY. * Session HOL-Library and HOL-Number_Theory: Exponentiation by squaring, used for computing powers in class "monoid_mult" and modular exponentiation. * Session HOL-Computational_Algebra: Formal Laurent series and overhaul of Formal power series. * Session HOL-Number_Theory: More material on residue rings in Carmichael's function, primitive roots, more properties for "ord". * Session HOL-Analysis: Better organization and much more material at the level of abstract topological spaces. * Session HOL-Algebra: Free abelian groups, etc., ported from HOL Light; algebraic closure of a field by de Vilhena and Baillon. * Session HOL-Homology has been added. It is a port of HOL Light's homology library, with new proofs of "invariance of domain" and related results. * Session HOL-SPARK: .prv files are no longer written to the file-system, but exported to the session database. Results may be retrieved via "isabelle build -e HOL-SPARK-Examples" on the command-line. * Sledgehammer: - The URL for SystemOnTPTP, which is used by remote provers, has been updated. - The machine-learning-based filter MaSh has been optimized to take less time (in most cases). * SMT: reconstruction is now possible using the SMT solver veriT. * Session HOL-Word: * New theory More_Word as comprehensive entrance point. * Merged type class bitss into type class bits. INCOMPATIBILITY. *** ML *** * Command 'generate_file' allows to produce sources for other languages, with antiquotations in the Isabelle context (only the control-cartouche form). The default "cartouche" antiquotation evaluates an ML expression of type string and inlines the result as a string literal of the target language. For example, this works for Haskell as follows: generate_file "Pure.hs" = \ module Isabelle.Pure where allConst, impConst, eqConst :: String allConst = \\<^const_name>\Pure.all\\ impConst = \\<^const_name>\Pure.imp\\ eqConst = \\<^const_name>\Pure.eq\\ \ See also commands 'export_generated_files' and 'compile_generated_files' to use the results. * ML evaluation (notably via command 'ML' or 'ML_file') is subject to option ML_environment to select a named environment, such as "Isabelle" for Isabelle/ML, or "SML" for official Standard ML. * ML antiquotation @{master_dir} refers to the master directory of the underlying theory, i.e. the directory of the theory file. * ML antiquotation @{verbatim} inlines its argument as string literal, preserving newlines literally. The short form \<^verbatim>\abc\ is particularly useful. * Local_Theory.reset is no longer available in user space. Regular definitional packages should use balanced blocks of Local_Theory.open_target versus Local_Theory.close_target instead, or the Local_Theory.subtarget(_result) combinator. Rare INCOMPATIBILITY. * Original PolyML.pointerEq is retained as a convenience for tools that don't use Isabelle/ML (where this is called "pointer_eq"). *** System *** * Update to OpenJDK 11: the current long-term support version of Java. * Update to Poly/ML 5.8 allows to use the native x86_64 platform without the full overhead of 64-bit values everywhere. This special x86_64_32 mode provides up to 16GB ML heap, while program code and stacks are allocated elsewhere. Thus approx. 5 times more memory is available for applications compared to old x86 mode (which is no longer used by Isabelle). The switch to the x86_64 CPU architecture also avoids compatibility problems with Linux and macOS, where 32-bit applications are gradually phased out. * System option "checkpoint" has been discontinued: obsolete thanks to improved memory management in Poly/ML. * System option "system_heaps" determines where to store the session image of "isabelle build" (and other tools using that internally). Former option "-s" is superseded by option "-o system_heaps". INCOMPATIBILITY in command-line syntax. * Session directory $ISABELLE_HOME/src/Tools/Haskell provides some source modules for Isabelle tools implemented in Haskell, notably for Isabelle/PIDE. * The command-line tool "isabelle build -e" retrieves theory exports from the session build database, using 'export_files' in session ROOT entries. * The command-line tool "isabelle update" uses Isabelle/PIDE in batch-mode to update theory sources based on semantic markup produced in Isabelle/ML. Actual updates depend on system options that may be enabled via "-u OPT" (for "update_OPT"), see also $ISABELLE_HOME/etc/options section "Theory update". Theory sessions are specified as in "isabelle dump". * The command-line tool "isabelle update -u control_cartouches" changes antiquotations into control-symbol format (where possible): @{NAME} becomes \<^NAME> and @{NAME ARG} becomes \<^NAME>\ARG\. * Support for Isabelle command-line tools defined in Isabelle/Scala. Instances of class Isabelle_Scala_Tools may be configured via the shell function "isabelle_scala_tools" in etc/settings (e.g. of an Isabelle component). * Isabelle Server command "use_theories" supports "nodes_status_delay" for continuous output of node status information. The time interval is specified in seconds; a negative value means it is disabled (default). * Isabelle Server command "use_theories" terminates more robustly in the presence of structurally broken sources: full consolidation of theories is no longer required. * OCaml tools and libraries are now accesed via ISABELLE_OCAMLFIND, which needs to point to a suitable version of "ocamlfind" (e.g. via OPAM, see below). INCOMPATIBILITY: settings variables ISABELLE_OCAML and ISABELLE_OCAMLC are no longer supported. * Support for managed installations of Glasgow Haskell Compiler and OCaml via the following command-line tools: isabelle ghc_setup isabelle ghc_stack isabelle ocaml_setup isabelle ocaml_opam The global installation state is determined by the following settings (and corresponding directory contents): ISABELLE_STACK_ROOT ISABELLE_STACK_RESOLVER ISABELLE_GHC_VERSION ISABELLE_OPAM_ROOT ISABELLE_OCAML_VERSION After setup, the following Isabelle settings are automatically redirected (overriding existing user settings): ISABELLE_GHC ISABELLE_OCAMLFIND The old meaning of these settings as locally installed executables may be recovered by purging the directories ISABELLE_STACK_ROOT / ISABELLE_OPAM_ROOT, or by resetting these variables in $ISABELLE_HOME_USER/etc/settings. New in Isabelle2018 (August 2018) --------------------------------- *** General *** * Session-qualified theory names are mandatory: it is no longer possible to refer to unqualified theories from the parent session. INCOMPATIBILITY for old developments that have not been updated to Isabelle2017 yet (using the "isabelle imports" tool). * Only the most fundamental theory names are global, usually the entry points to major logic sessions: Pure, Main, Complex_Main, HOLCF, IFOL, FOL, ZF, ZFC etc. INCOMPATIBILITY, need to use qualified names for formerly global "HOL-Probability.Probability" and "HOL-SPARK.SPARK". * Global facts need to be closed: no free variables and no hypotheses. Rare INCOMPATIBILITY. * Facts stemming from locale interpretation are subject to lazy evaluation for improved performance. Rare INCOMPATIBILITY: errors stemming from interpretation morphisms might be deferred and thus difficult to locate; enable system option "strict_facts" temporarily to avoid this. * Marginal comments need to be written exclusively in the new-style form "\ \text\", old ASCII variants like "-- {* ... *}" are no longer supported. INCOMPATIBILITY, use the command-line tool "isabelle update_comments" to update existing theory files. * Old-style inner comments (* ... *) within the term language are legacy and will be discontinued soon: use formal comments "\ \...\" or "\<^cancel>\...\" instead. * The "op " syntax for infix operators has been replaced by "()". If begins or ends with a "*", there needs to be a space between the "*" and the corresponding parenthesis. INCOMPATIBILITY, use the command-line tool "isabelle update_op" to convert theory and ML files to the new syntax. Because it is based on regular expression matching, the result may need a bit of manual postprocessing. Invoking "isabelle update_op" converts all files in the current directory (recursively). In case you want to exclude conversion of ML files (because the tool frequently also converts ML's "op" syntax), use option "-m". * Theory header 'abbrevs' specifications need to be separated by 'and'. INCOMPATIBILITY. * Command 'external_file' declares the formal dependency on the given file name, such that the Isabelle build process knows about it, but without specific Prover IDE management. * Session ROOT entries no longer allow specification of 'files'. Rare INCOMPATIBILITY, use command 'external_file' within a proper theory context. * Session root directories may be specified multiple times: each accessible ROOT file is processed only once. This facilitates specification of $ISABELLE_HOME_USER/ROOTS or command-line options like -d or -D for "isabelle build" and "isabelle jedit". Example: isabelle build -D '~~/src/ZF' * The command 'display_drafts' has been discontinued. INCOMPATIBILITY, use action "isabelle.draft" (or "print") in Isabelle/jEdit instead. * In HTML output, the Isabelle symbol "\" is rendered as explicit Unicode hyphen U+2010, to avoid unclear meaning of the old "soft hyphen" U+00AD. Rare INCOMPATIBILITY, e.g. copy-paste of historic Isabelle HTML output. *** Isabelle/jEdit Prover IDE *** * The command-line tool "isabelle jedit" provides more flexible options for session management: - option -R builds an auxiliary logic image with all theories from other sessions that are not already present in its parent - option -S is like -R, with a focus on the selected session and its descendants (this reduces startup time for big projects like AFP) - option -A specifies an alternative ancestor session for options -R and -S - option -i includes additional sessions into the name-space of theories Examples: isabelle jedit -R HOL-Number_Theory isabelle jedit -R HOL-Number_Theory -A HOL isabelle jedit -d '$AFP' -S Formal_SSA -A HOL isabelle jedit -d '$AFP' -S Formal_SSA -A HOL-Analysis isabelle jedit -d '$AFP' -S Formal_SSA -A HOL-Analysis -i CryptHOL * PIDE markup for session ROOT files: allows to complete session names, follow links to theories and document files etc. * Completion supports theory header imports, using theory base name. E.g. "Prob" may be completed to "HOL-Probability.Probability". * Named control symbols (without special Unicode rendering) are shown as bold-italic keyword. This is particularly useful for the short form of antiquotations with control symbol: \<^name>\argument\. The action "isabelle.antiquoted_cartouche" turns an antiquotation with 0 or 1 arguments into this format. * Completion provides templates for named symbols with arguments, e.g. "\ \ARGUMENT\" or "\<^emph>\ARGUMENT\". * Slightly more parallel checking, notably for high priority print functions (e.g. State output). * The view title is set dynamically, according to the Isabelle distribution and the logic session name. The user can override this via set-view-title (stored persistently in $JEDIT_SETTINGS/perspective.xml). * System options "spell_checker_include" and "spell_checker_exclude" supersede former "spell_checker_elements" to determine regions of text that are subject to spell-checking. Minor INCOMPATIBILITY. * Action "isabelle.preview" is able to present more file formats, notably bibtex database files and ML files. * Action "isabelle.draft" is similar to "isabelle.preview", but shows a plain-text document draft. Both are available via the menu "Plugins / Isabelle". * When loading text files, the Isabelle symbols encoding UTF-8-Isabelle is only used if there is no conflict with existing Unicode sequences in the file. Otherwise, the fallback encoding is plain UTF-8 and Isabelle symbols remain in literal \ form. This avoids accidental loss of Unicode content when saving the file. * Bibtex database files (.bib) are semantically checked. * Update to jedit-5.5.0, the latest release. *** Isabelle/VSCode Prover IDE *** * HTML preview of theories and other file-formats similar to Isabelle/jEdit. * Command-line tool "isabelle vscode_server" accepts the same options -A, -R, -S, -i for session selection as "isabelle jedit". This is relevant for isabelle.args configuration settings in VSCode. The former option -A (explore all known session files) has been discontinued: it is enabled by default, unless option -S is used to focus on a particular spot in the session structure. INCOMPATIBILITY. *** Document preparation *** * Formal comments work uniformly in outer syntax, inner syntax (term language), Isabelle/ML and some other embedded languages of Isabelle. See also "Document comments" in the isar-ref manual. The following forms are supported: - marginal text comment: \ \\\ - canceled source: \<^cancel>\\\ - raw LaTeX: \<^latex>\\\ * Outside of the inner theory body, the default presentation context is theory Pure. Thus elementary antiquotations may be used in markup commands (e.g. 'chapter', 'section', 'text') and formal comments. * System option "document_tags" specifies alternative command tags. This is occasionally useful to control the global visibility of commands via session options (e.g. in ROOT). * Document markup commands ('section', 'text' etc.) are implicitly tagged as "document" and visible by default. This avoids the application of option "document_tags" to these commands. * Isabelle names are mangled into LaTeX macro names to allow the full identifier syntax with underscore, prime, digits. This is relevant for antiquotations in control symbol notation, e.g. \<^const_name> becomes \isactrlconstUNDERSCOREname. * Document preparation with skip_proofs option now preserves the content more accurately: only terminal proof steps ('by' etc.) are skipped. * Document antiquotation @{theory name} requires the long session-qualified theory name: this is what users reading the text normally need to import. * Document antiquotation @{session name} checks and prints the given session name verbatim. * Document antiquotation @{cite} now checks the given Bibtex entries against the Bibtex database files -- only in batch-mode session builds. * Command-line tool "isabelle document" has been re-implemented in Isabelle/Scala, with simplified arguments and explicit errors from the latex and bibtex process. Minor INCOMPATIBILITY. * Session ROOT entry: empty 'document_files' means there is no document for this session. There is no need to specify options [document = false] anymore. *** Isar *** * Command 'interpret' no longer exposes resulting theorems as literal facts, notably for the \prop\ notation or the "fact" proof method. This improves modularity of proofs and scalability of locale interpretation. Rare INCOMPATIBILITY, need to refer to explicitly named facts instead (e.g. use 'find_theorems' or 'try' to figure this out). * The old 'def' command has been discontinued (legacy since Isbelle2016-1). INCOMPATIBILITY, use 'define' instead -- usually with object-logic equality or equivalence. *** Pure *** * The inner syntax category "sort" now includes notation "_" for the dummy sort: it is effectively ignored in type-inference. * Rewrites clauses (keyword 'rewrites') were moved into the locale expression syntax, where they are part of locale instances. In interpretation commands rewrites clauses now need to occur before 'for' and 'defines'. Rare INCOMPATIBILITY; definitions immediately subject to rewriting may need to be pulled up into the surrounding theory. * For 'rewrites' clauses, if activating a locale instance fails, fall back to reading the clause first. This helps avoid qualification of locale instances where the qualifier's sole purpose is avoiding duplicate constant declarations. * Proof method "simp" now supports a new modifier "flip:" followed by a list of theorems. Each of these theorems is removed from the simpset (without warning if it is not there) and the symmetric version of the theorem (i.e. lhs and rhs exchanged) is added to the simpset. For "auto" and friends the modifier is "simp flip:". *** HOL *** * Sledgehammer: bundled version of "vampire" (for non-commercial users) helps to avoid fragility of "remote_vampire" service. * Clarified relationship of characters, strings and code generation: - Type "char" is now a proper datatype of 8-bit values. - Conversions "nat_of_char" and "char_of_nat" are gone; use more general conversions "of_char" and "char_of" with suitable type constraints instead. - The zero character is just written "CHR 0x00", not "0" any longer. - Type "String.literal" (for code generation) is now isomorphic to lists of 7-bit (ASCII) values; concrete values can be written as "STR ''...''" for sequences of printable characters and "STR 0x..." for one single ASCII code point given as hexadecimal numeral. - Type "String.literal" supports concatenation "... + ..." for all standard target languages. - Theory HOL-Library.Code_Char is gone; study the explanations concerning "String.literal" in the tutorial on code generation to get an idea how target-language string literals can be converted to HOL string values and vice versa. - Session Imperative-HOL: operation "raise" directly takes a value of type "String.literal" as argument, not type "string". INCOMPATIBILITY. * Code generation: Code generation takes an explicit option "case_insensitive" to accomodate case-insensitive file systems. * Abstract bit operations as part of Main: push_bit, take_bit, drop_bit. * New, more general, axiomatization of complete_distrib_lattice. The former axioms: "sup x (Inf X) = Inf (sup x ` X)" and "inf x (Sup X) = Sup (inf x ` X)" are replaced by: "Inf (Sup ` A) <= Sup (Inf ` {f ` A | f . (! Y \ A . f Y \ Y)})" The instantiations of sets and functions as complete_distrib_lattice are moved to Hilbert_Choice.thy because their proofs need the Hilbert choice operator. The dual of this property is also proved in theory HOL.Hilbert_Choice. * New syntax for the minimum/maximum of a function over a finite set: MIN x\A. B and even MIN x. B (only useful for finite types), also MAX. * Clarifed theorem names: Min.antimono ~> Min.subset_imp Max.antimono ~> Max.subset_imp Minor INCOMPATIBILITY. * SMT module: - The 'smt_oracle' option is now necessary when using the 'smt' method with a solver other than Z3. INCOMPATIBILITY. - The encoding to first-order logic is now more complete in the presence of higher-order quantifiers. An 'smt_explicit_application' option has been added to control this. INCOMPATIBILITY. * Facts sum.commute(_restrict) and prod.commute(_restrict) renamed to sum.swap(_restrict) and prod.swap(_restrict), to avoid name clashes on interpretation of abstract locales. INCOMPATIBILITY. * Predicate coprime is now a real definition, not a mere abbreviation. INCOMPATIBILITY. * Predicate pairwise_coprime abolished, use "pairwise coprime" instead. INCOMPATIBILITY. * The relator rel_filter on filters has been strengthened to its canonical categorical definition with better properties. INCOMPATIBILITY. * Generalized linear algebra involving linear, span, dependent, dim from type class real_vector to locales module and vector_space. Renamed: span_inc ~> span_superset span_superset ~> span_base span_eq ~> span_eq_iff INCOMPATIBILITY. * Class linordered_semiring_1 covers zero_less_one also, ruling out pathologic instances. Minor INCOMPATIBILITY. * Theory HOL.List: functions "sorted_wrt" and "sorted" now compare every element in a list to all following elements, not just the next one. * Theory HOL.List syntax: - filter-syntax "[x <- xs. P]" is no longer output syntax, but only input syntax - list comprehension syntax now supports tuple patterns in "pat <- xs" * Theory Map: "empty" must now be qualified as "Map.empty". * Removed nat-int transfer machinery. Rare INCOMPATIBILITY. * Fact mod_mult_self4 (on nat) renamed to Suc_mod_mult_self3, to avoid clash with fact mod_mult_self4 (on more generic semirings). INCOMPATIBILITY. * Eliminated some theorem aliasses: even_times_iff ~> even_mult_iff mod_2_not_eq_zero_eq_one_nat ~> not_mod_2_eq_0_eq_1 even_of_nat ~> even_int_iff INCOMPATIBILITY. * Eliminated some theorem duplicate variations: - dvd_eq_mod_eq_0_numeral can be replaced by dvd_eq_mod_eq_0 - mod_Suc_eq_Suc_mod can be replaced by mod_Suc - mod_Suc_eq_Suc_mod [symmetrict] can be replaced by mod_simps - mod_eq_0_iff can be replaced by mod_eq_0_iff_dvd and dvd_def - the witness of mod_eqD can be given directly as "_ div _" INCOMPATIBILITY. * Classical setup: Assumption "m mod d = 0" (for m d :: nat) is no longer aggresively destroyed to "\q. m = d * q". INCOMPATIBILITY, adding "elim!: dvd" to classical proof methods in most situations restores broken proofs. * Theory HOL-Library.Conditional_Parametricity provides command 'parametric_constant' for proving parametricity of non-recursive definitions. For constants that are not fully parametric the command will infer conditions on relations (e.g., bi_unique, bi_total, or type class conditions such as "respects 0") sufficient for parametricity. See theory HOL-ex.Conditional_Parametricity_Examples for some examples. * Theory HOL-Library.Code_Lazy provides a new preprocessor for the code generator to generate code for algebraic types with lazy evaluation semantics even in call-by-value target languages. See the theories HOL-ex.Code_Lazy_Demo and HOL-Codegenerator_Test.Code_Lazy_Test for some examples. * Theory HOL-Library.Landau_Symbols has been moved here from AFP. * Theory HOL-Library.Old_Datatype no longer provides the legacy command 'old_datatype'. INCOMPATIBILITY. * Theory HOL-Computational_Algebra.Polynomial_Factorial does not provide instances of rat, real, complex as factorial rings etc. Import HOL-Computational_Algebra.Field_as_Ring explicitly in case of need. INCOMPATIBILITY. * Session HOL-Algebra: renamed (^) to [^] to avoid conflict with new infix/prefix notation. * Session HOL-Algebra: revamped with much new material. The set of isomorphisms between two groups is now denoted iso rather than iso_set. INCOMPATIBILITY. * Session HOL-Analysis: the Arg function now respects the same interval as Ln, namely (-pi,pi]; the old Arg function has been renamed Arg2pi. INCOMPATIBILITY. * Session HOL-Analysis: the functions zorder, zer_poly, porder and pol_poly have been redefined. All related lemmas have been reworked. INCOMPATIBILITY. * Session HOL-Analysis: infinite products, Moebius functions, the Riemann mapping theorem, the Vitali covering theorem, change-of-variables results for integration and measures. * Session HOL-Real_Asymp: proof method "real_asymp" proves asymptotics or real-valued functions (limits, "Big-O", etc.) automatically. See also ~~/src/HOL/Real_Asymp/Manual for some documentation. * Session HOL-Types_To_Sets: more tool support (unoverload_type combines internalize_sorts and unoverload) and larger experimental application (type based linear algebra transferred to linear algebra on subspaces). *** ML *** * Operation Export.export emits theory exports (arbitrary blobs), which are stored persistently in the session build database. * Command 'ML_export' exports ML toplevel bindings to the global bootstrap environment of the ML process. This allows ML evaluation without a formal theory context, e.g. in command-line tools like "isabelle process". *** System *** * Mac OS X 10.10 Yosemite is now the baseline version; Mavericks is no longer supported. * Linux and Windows/Cygwin is for x86_64 only, old 32bit platform support has been discontinued. * Java runtime is for x86_64 only. Corresponding Isabelle settings have been renamed to ISABELLE_TOOL_JAVA_OPTIONS and JEDIT_JAVA_OPTIONS, instead of former 32/64 variants. INCOMPATIBILITY. * Old settings ISABELLE_PLATFORM and ISABELLE_WINDOWS_PLATFORM should be phased out due to unclear preference of 32bit vs. 64bit architecture. Explicit GNU bash expressions are now preferred, for example (with quotes): #Posix executables (Unix or Cygwin), with preference for 64bit "${ISABELLE_PLATFORM64:-$ISABELLE_PLATFORM32}" #native Windows or Unix executables, with preference for 64bit "${ISABELLE_WINDOWS_PLATFORM64:-${ISABELLE_WINDOWS_PLATFORM32:-${ISABELLE_PLATFORM64:-$ISABELLE_PLATFORM32}}}" #native Windows (32bit) or Unix executables (preference for 64bit) "${ISABELLE_WINDOWS_PLATFORM32:-${ISABELLE_PLATFORM64:-$ISABELLE_PLATFORM32}}" * Command-line tool "isabelle build" supports new options: - option -B NAME: include session NAME and all descendants - option -S: only observe changes of sources, not heap images - option -f: forces a fresh build * Command-line tool "isabelle build" options -c -x -B refer to descendants wrt. the session parent or import graph. Subtle INCOMPATIBILITY: options -c -x used to refer to the session parent graph only. * Command-line tool "isabelle build" takes "condition" options with the corresponding environment values into account, when determining the up-to-date status of a session. * The command-line tool "dump" dumps information from the cumulative PIDE session database: many sessions may be loaded into a given logic image, results from all loaded theories are written to the output directory. * Command-line tool "isabelle imports -I" also reports actual session imports. This helps to minimize the session dependency graph. * The command-line tool "export" and 'export_files' in session ROOT entries retrieve theory exports from the session build database. * The command-line tools "isabelle server" and "isabelle client" provide access to the Isabelle Server: it supports responsive session management and concurrent use of theories, based on Isabelle/PIDE infrastructure. See also the "system" manual. * The command-line tool "isabelle update_comments" normalizes formal comments in outer syntax as follows: \ \text\ (whith a single space to approximate the appearance in document output). This is more specific than former "isabelle update_cartouches -c": the latter tool option has been discontinued. * The command-line tool "isabelle mkroot" now always produces a document outline: its options have been adapted accordingly. INCOMPATIBILITY. * The command-line tool "isabelle mkroot -I" initializes a Mercurial repository for the generated session files. * Settings ISABELLE_HEAPS + ISABELLE_BROWSER_INFO (or ISABELLE_HEAPS_SYSTEM + ISABELLE_BROWSER_INFO_SYSTEM in "system build mode") determine the directory locations of the main build artefacts -- instead of hard-wired directories in ISABELLE_HOME_USER (or ISABELLE_HOME). * Settings ISABELLE_PATH and ISABELLE_OUTPUT have been discontinued: heap images and session databases are always stored in $ISABELLE_HEAPS/$ML_IDENTIFIER (command-line default) or $ISABELLE_HEAPS_SYSTEM/$ML_IDENTIFIER (main Isabelle application or "isabelle jedit -s" or "isabelle build -s"). * ISABELLE_LATEX and ISABELLE_PDFLATEX now include platform-specific options for improved error reporting. Potential INCOMPATIBILITY with unusual LaTeX installations, may have to adapt these settings. * Update to Poly/ML 5.7.1 with slightly improved performance and PIDE markup for identifier bindings. It now uses The GNU Multiple Precision Arithmetic Library (libgmp) on all platforms, notably Mac OS X with 32/64 bit. New in Isabelle2017 (October 2017) ---------------------------------- *** General *** * Experimental support for Visual Studio Code (VSCode) as alternative Isabelle/PIDE front-end, see also https://marketplace.visualstudio.com/items?itemName=makarius.Isabelle2017 VSCode is a new type of application that continues the concepts of "programmer's editor" and "integrated development environment" towards fully semantic editing and debugging -- in a relatively light-weight manner. Thus it fits nicely on top of the Isabelle/PIDE infrastructure. Technically, VSCode is based on the Electron application framework (Node.js + Chromium browser + V8), which is implemented in JavaScript and TypeScript, while Isabelle/VSCode mainly consists of Isabelle/Scala modules around a Language Server implementation. * Theory names are qualified by the session name that they belong to. This affects imports, but not the theory name space prefix (which is just the theory base name as before). In order to import theories from other sessions, the ROOT file format provides a new 'sessions' keyword. In contrast, a theory that is imported in the old-fashioned manner via an explicit file-system path belongs to the current session, and might cause theory name conflicts later on. Theories that are imported from other sessions are excluded from the current session document. The command-line tool "isabelle imports" helps to update theory imports. * The main theory entry points for some non-HOL sessions have changed, to avoid confusion with the global name "Main" of the session HOL. This leads to the follow renamings: CTT/Main.thy ~> CTT/CTT.thy ZF/Main.thy ~> ZF/ZF.thy ZF/Main_ZF.thy ~> ZF/ZF.thy ZF/Main_ZFC.thy ~> ZF/ZFC.thy ZF/ZF.thy ~> ZF/ZF_Base.thy INCOMPATIBILITY. * Commands 'alias' and 'type_alias' introduce aliases for constants and type constructors, respectively. This allows adhoc changes to name-space accesses within global or local theory contexts, e.g. within a 'bundle'. * Document antiquotations @{prf} and @{full_prf} output proof terms (again) in the same way as commands 'prf' and 'full_prf'. * Computations generated by the code generator can be embedded directly into ML, alongside with @{code} antiquotations, using the following antiquotations: @{computation ... terms: ... datatypes: ...} : ((term -> term) -> 'ml option -> 'a) -> Proof.context -> term -> 'a @{computation_conv ... terms: ... datatypes: ...} : (Proof.context -> 'ml -> conv) -> Proof.context -> conv @{computation_check terms: ... datatypes: ...} : Proof.context -> conv See src/HOL/ex/Computations.thy, src/HOL/Decision_Procs/Commutative_Ring.thy and src/HOL/Decision_Procs/Reflective_Field.thy for examples and the tutorial on code generation. *** Prover IDE -- Isabelle/Scala/jEdit *** * Session-qualified theory imports allow the Prover IDE to process arbitrary theory hierarchies independently of the underlying logic session image (e.g. option "isabelle jedit -l"), but the directory structure needs to be known in advance (e.g. option "isabelle jedit -d" or a line in the file $ISABELLE_HOME_USER/ROOTS). * The PIDE document model maintains file content independently of the status of jEdit editor buffers. Reloading jEdit buffers no longer causes changes of formal document content. Theory dependencies are always resolved internally, without the need for corresponding editor buffers. The system option "jedit_auto_load" has been discontinued: it is effectively always enabled. * The Theories dockable provides a "Purge" button, in order to restrict the document model to theories that are required for open editor buffers. * The Theories dockable indicates the overall status of checking of each entry. When all forked tasks of a theory are finished, the border is painted with thick lines; remaining errors in this situation are represented by a different border color. * Automatic indentation is more careful to avoid redundant spaces in intermediate situations. Keywords are indented after input (via typed characters or completion); see also option "jedit_indent_input". * Action "isabelle.preview" opens an HTML preview of the current theory document in the default web browser. * Command-line invocation "isabelle jedit -R -l LOGIC" opens the ROOT entry of the specified logic session in the editor, while its parent is used for formal checking. * The main Isabelle/jEdit plugin may be restarted manually (using the jEdit Plugin Manager), as long as the "Isabelle Base" plugin remains enabled at all times. * Update to current jedit-5.4.0. *** Pure *** * Deleting the last code equations for a particular function using [code del] results in function with no equations (runtime abort) rather than an unimplemented function (generation time abort). Use explicit [[code drop:]] to enforce the latter. Minor INCOMPATIBILITY. * Proper concept of code declarations in code.ML: - Regular code declarations act only on the global theory level, being ignored with warnings if syntactically malformed. - Explicitly global code declarations yield errors if syntactically malformed. - Default code declarations are silently ignored if syntactically malformed. Minor INCOMPATIBILITY. * Clarified and standardized internal data bookkeeping of code declarations: history of serials allows to track potentially non-monotonous declarations appropriately. Minor INCOMPATIBILITY. *** HOL *** * The Nunchaku model finder is now part of "Main". * SMT module: - A new option, 'smt_nat_as_int', has been added to translate 'nat' to 'int' and benefit from the SMT solver's theory reasoning. It is disabled by default. - The legacy module "src/HOL/Library/Old_SMT.thy" has been removed. - Several small issues have been rectified in the 'smt' command. * (Co)datatype package: The 'size_gen_o_map' lemma is no longer generated for datatypes with type class annotations. As a result, the tactic that derives it no longer fails on nested datatypes. Slight INCOMPATIBILITY. * Command and antiquotation "value" with modified default strategy: terms without free variables are always evaluated using plain evaluation only, with no fallback on normalization by evaluation. Minor INCOMPATIBILITY. * Theories "GCD" and "Binomial" are already included in "Main" (instead of "Complex_Main"). * Constant "surj" is a full input/output abbreviation (again). Minor INCOMPATIBILITY. * Dropped aliasses RangeP, DomainP for Rangep, Domainp respectively. INCOMPATIBILITY. * Renamed ii to imaginary_unit in order to free up ii as a variable name. The syntax \ remains available. INCOMPATIBILITY. * Dropped abbreviations transP, antisymP, single_valuedP; use constants transp, antisymp, single_valuedp instead. INCOMPATIBILITY. * Constant "subseq" in Topological_Spaces has been removed -- it is subsumed by "strict_mono". Some basic lemmas specific to "subseq" have been renamed accordingly, e.g. "subseq_o" -> "strict_mono_o" etc. * Theory List: "sublist" renamed to "nths" in analogy with "nth", and "sublisteq" renamed to "subseq". Minor INCOMPATIBILITY. * Theory List: new generic function "sorted_wrt". * Named theorems mod_simps covers various congruence rules concerning mod, replacing former zmod_simps. INCOMPATIBILITY. * Swapped orientation of congruence rules mod_add_left_eq, mod_add_right_eq, mod_add_eq, mod_mult_left_eq, mod_mult_right_eq, mod_mult_eq, mod_minus_eq, mod_diff_left_eq, mod_diff_right_eq, mod_diff_eq. INCOMPATIBILITY. * Generalized some facts: measure_induct_rule measure_induct zminus_zmod ~> mod_minus_eq zdiff_zmod_left ~> mod_diff_left_eq zdiff_zmod_right ~> mod_diff_right_eq zmod_eq_dvd_iff ~> mod_eq_dvd_iff INCOMPATIBILITY. * Algebraic type class hierarchy of euclidean (semi)rings in HOL: euclidean_(semi)ring, euclidean_(semi)ring_cancel, unique_euclidean_(semi)ring; instantiation requires provision of a euclidean size. * Theory "HOL-Number_Theory.Euclidean_Algorithm" has been reworked: - Euclidean induction is available as rule eucl_induct. - Constants Euclidean_Algorithm.gcd, Euclidean_Algorithm.lcm, Euclidean_Algorithm.Gcd and Euclidean_Algorithm.Lcm allow easy instantiation of euclidean (semi)rings as GCD (semi)rings. - Coefficients obtained by extended euclidean algorithm are available as "bezout_coefficients". INCOMPATIBILITY. * Theory "Number_Theory.Totient" introduces basic notions about Euler's totient function previously hidden as solitary example in theory Residues. Definition changed so that "totient 1 = 1" in agreement with the literature. Minor INCOMPATIBILITY. * New styles in theory "HOL-Library.LaTeXsugar": - "dummy_pats" for printing equations with "_" on the lhs; - "eta_expand" for printing eta-expanded terms. * Theory "HOL-Library.Permutations": theorem bij_swap_ompose_bij has been renamed to bij_swap_compose_bij. INCOMPATIBILITY. * New theory "HOL-Library.Going_To_Filter" providing the "f going_to F" filter for describing points x such that f(x) is in the filter F. * Theory "HOL-Library.Formal_Power_Series": constants X/E/L/F have been renamed to fps_X/fps_exp/fps_ln/fps_hypergeo to avoid polluting the name space. INCOMPATIBILITY. * Theory "HOL-Library.FinFun" has been moved to AFP (again). INCOMPATIBILITY. * Theory "HOL-Library.FuncSet": some old and rarely used ASCII replacement syntax has been removed. INCOMPATIBILITY, standard syntax with symbols should be used instead. The subsequent commands help to reproduce the old forms, e.g. to simplify porting old theories: syntax (ASCII) "_PiE" :: "pttrn \ 'a set \ 'b set \ ('a \ 'b) set" ("(3PIE _:_./ _)" 10) "_Pi" :: "pttrn \ 'a set \ 'b set \ ('a \ 'b) set" ("(3PI _:_./ _)" 10) "_lam" :: "pttrn \ 'a set \ 'a \ 'b \ ('a \ 'b)" ("(3%_:_./ _)" [0,0,3] 3) * Theory "HOL-Library.Multiset": the simprocs on subsets operators of multisets have been renamed: msetless_cancel_numerals ~> msetsubset_cancel msetle_cancel_numerals ~> msetsubset_eq_cancel INCOMPATIBILITY. * Theory "HOL-Library.Pattern_Aliases" provides input and output syntax for pattern aliases as known from Haskell, Scala and ML. * Theory "HOL-Library.Uprod" formalizes the type of unordered pairs. * Session HOL-Analysis: more material involving arcs, paths, covering spaces, innessential maps, retracts, infinite products, simplicial complexes. Baire Category theorem. Major results include the Jordan Curve Theorem and the Great Picard Theorem. * Session HOL-Algebra has been extended by additional lattice theory: the Knaster-Tarski fixed point theorem and Galois Connections. * Sessions HOL-Computational_Algebra and HOL-Number_Theory: new notions of squarefreeness, n-th powers, and prime powers. * Session "HOL-Computional_Algebra" covers many previously scattered theories, notably Euclidean_Algorithm, Factorial_Ring, Formal_Power_Series, Fraction_Field, Fundamental_Theorem_Algebra, Normalized_Fraction, Polynomial_FPS, Polynomial, Primes. Minor INCOMPATIBILITY. *** System *** * Isabelle/Scala: the SQL module supports access to relational databases, either as plain file (SQLite) or full-scale server (PostgreSQL via local port or remote ssh connection). * Results of "isabelle build" are recorded as SQLite database (i.e. "Application File Format" in the sense of https://www.sqlite.org/appfileformat.html). This allows systematic access via operations from module Sessions.Store in Isabelle/Scala. * System option "parallel_proofs" is 1 by default (instead of more aggressive 2). This requires less heap space and avoids burning parallel CPU cycles, while full subproof parallelization is enabled for repeated builds (according to parallel_subproofs_threshold). * System option "record_proofs" allows to change the global Proofterm.proofs variable for a session. Regular values are are 0, 1, 2; a negative value means the current state in the ML heap image remains unchanged. * Isabelle settings variable ISABELLE_SCALA_BUILD_OPTIONS has been renamed to ISABELLE_SCALAC_OPTIONS. Rare INCOMPATIBILITY. * Isabelle settings variables ISABELLE_WINDOWS_PLATFORM, ISABELLE_WINDOWS_PLATFORM32, ISABELLE_WINDOWS_PLATFORM64 indicate the native Windows platform (independently of the Cygwin installation). This is analogous to ISABELLE_PLATFORM, ISABELLE_PLATFORM32, ISABELLE_PLATFORM64. * Command-line tool "isabelle build_docker" builds a Docker image from the Isabelle application bundle for Linux. See also https://hub.docker.com/r/makarius/isabelle * Command-line tool "isabelle vscode_server" provides a Language Server Protocol implementation, e.g. for the Visual Studio Code editor. It serves as example for alternative PIDE front-ends. * Command-line tool "isabelle imports" helps to maintain theory imports wrt. session structure. Examples for the main Isabelle distribution: isabelle imports -I -a isabelle imports -U -a isabelle imports -U -i -a isabelle imports -M -a -d '~~/src/Benchmarks' New in Isabelle2016-1 (December 2016) ------------------------------------- *** General *** * Splitter in proof methods "simp", "auto" and friends: - The syntax "split add" has been discontinued, use plain "split", INCOMPATIBILITY. - For situations with many conditional or case expressions, there is an alternative splitting strategy that can be much faster. It is selected by writing "split!" instead of "split". It applies safe introduction and elimination rules after each split rule. As a result the subgoal may be split into several subgoals. * Command 'bundle' provides a local theory target to define a bundle from the body of specification commands (such as 'declare', 'declaration', 'notation', 'lemmas', 'lemma'). For example: bundle foo begin declare a [simp] declare b [intro] end * Command 'unbundle' is like 'include', but works within a local theory context. Unlike "context includes ... begin", the effect of 'unbundle' on the target context persists, until different declarations are given. * Simplified outer syntax: uniform category "name" includes long identifiers. Former "xname" / "nameref" / "name reference" has been discontinued. * Embedded content (e.g. the inner syntax of types, terms, props) may be delimited uniformly via cartouches. This works better than old-fashioned quotes when sub-languages are nested. * Mixfix annotations support general block properties, with syntax "(\x=a y=b z \\". Notable property names are "indent", "consistent", "unbreakable", "markup". The existing notation "(DIGITS" is equivalent to "(\indent=DIGITS\". The former notation "(00" for unbreakable blocks is superseded by "(\unbreabable\" --- rare INCOMPATIBILITY. * Proof method "blast" is more robust wrt. corner cases of Pure statements without object-logic judgment. * Commands 'prf' and 'full_prf' are somewhat more informative (again): proof terms are reconstructed and cleaned from administrative thm nodes. * Code generator: config option "code_timing" triggers measurements of different phases of code generation. See src/HOL/ex/Code_Timing.thy for examples. * Code generator: implicits in Scala (stemming from type class instances) are generated into companion object of corresponding type class, to resolve some situations where ambiguities may occur. * Solve direct: option "solve_direct_strict_warnings" gives explicit warnings for lemma statements with trivial proofs. *** Prover IDE -- Isabelle/Scala/jEdit *** * More aggressive flushing of machine-generated input, according to system option editor_generated_input_delay (in addition to existing editor_input_delay for regular user edits). This may affect overall PIDE reactivity and CPU usage. * Syntactic indentation according to Isabelle outer syntax. Action "indent-lines" (shortcut C+i) indents the current line according to command keywords and some command substructure. Action "isabelle.newline" (shortcut ENTER) indents the old and the new line according to command keywords only; see also option "jedit_indent_newline". * Semantic indentation for unstructured proof scripts ('apply' etc.) via number of subgoals. This requires information of ongoing document processing and may thus lag behind, when the user is editing too quickly; see also option "jedit_script_indent" and "jedit_script_indent_limit". * Refined folding mode "isabelle" based on Isar syntax: 'next' and 'qed' are treated as delimiters for fold structure; 'begin' and 'end' structure of theory specifications is treated as well. * Command 'proof' provides information about proof outline with cases, e.g. for proof methods "cases", "induct", "goal_cases". * Completion templates for commands involving "begin ... end" blocks, e.g. 'context', 'notepad'. * Sidekick parser "isabelle-context" shows nesting of context blocks according to 'begin' and 'end' structure. * Highlighting of entity def/ref positions wrt. cursor. * Action "isabelle.select-entity" (shortcut CS+ENTER) selects all occurrences of the formal entity at the caret position. This facilitates systematic renaming. * PIDE document markup works across multiple Isar commands, e.g. the results established at the end of a proof are properly identified in the theorem statement. * Cartouche abbreviations work both for " and ` to accomodate typical situations where old ASCII notation may be updated. * Dockable window "Symbols" also provides access to 'abbrevs' from the outer syntax of the current theory buffer. This provides clickable syntax templates, including entries with empty abbrevs name (which are inaccessible via keyboard completion). * IDE support for the Isabelle/Pure bootstrap process, with the following independent stages: src/Pure/ROOT0.ML src/Pure/ROOT.ML src/Pure/Pure.thy src/Pure/ML_Bootstrap.thy The ML ROOT files act like quasi-theories in the context of theory ML_Bootstrap: this allows continuous checking of all loaded ML files. The theory files are presented with a modified header to import Pure from the running Isabelle instance. Results from changed versions of each stage are *not* propagated to the next stage, and isolated from the actual Isabelle/Pure that runs the IDE itself. The sequential dependencies of the above files are only observed for batch build. * Isabelle/ML and Standard ML files are presented in Sidekick with the tree structure of section headings: this special comment format is described in "implementation" chapter 0, e.g. (*** section ***). * Additional abbreviations for syntactic completion may be specified within the theory header as 'abbrevs'. The theory syntax for 'keywords' has been simplified accordingly: optional abbrevs need to go into the new 'abbrevs' section. * Global abbreviations via $ISABELLE_HOME/etc/abbrevs and $ISABELLE_HOME_USER/etc/abbrevs are no longer supported. Minor INCOMPATIBILITY, use 'abbrevs' within theory header instead. * Action "isabelle.keymap-merge" asks the user to resolve pending Isabelle keymap changes that are in conflict with the current jEdit keymap; non-conflicting changes are always applied implicitly. This action is automatically invoked on Isabelle/jEdit startup and thus increases chances that users see new keyboard shortcuts when re-using old keymaps. * ML and document antiquotations for file-systems paths are more uniform and diverse: @{path NAME} -- no file-system check @{file NAME} -- check for plain file @{dir NAME} -- check for directory Minor INCOMPATIBILITY, former uses of @{file} and @{file_unchecked} may have to be changed. *** Document preparation *** * New symbol \, e.g. for temporal operator. * New document and ML antiquotation @{locale} for locales, similar to existing antiquotation @{class}. * Mixfix annotations support delimiters like \<^control>\cartouche\ -- this allows special forms of document output. * Raw LaTeX output now works via \<^latex>\...\ instead of raw control symbol \<^raw:...>. INCOMPATIBILITY, notably for LaTeXsugar.thy and its derivatives. * \<^raw:...> symbols are no longer supported. * Old 'header' command is no longer supported (legacy since Isabelle2015). *** Isar *** * Many specification elements support structured statements with 'if' / 'for' eigen-context, e.g. 'axiomatization', 'abbreviation', 'definition', 'inductive', 'function'. * Toplevel theorem statements support eigen-context notation with 'if' / 'for' (in postfix), which corresponds to 'assumes' / 'fixes' in the traditional long statement form (in prefix). Local premises are called "that" or "assms", respectively. Empty premises are *not* bound in the context: INCOMPATIBILITY. * Command 'define' introduces a local (non-polymorphic) definition, with optional abstraction over local parameters. The syntax resembles 'definition' and 'obtain'. It fits better into the Isar language than old 'def', which is now a legacy feature. * Command 'obtain' supports structured statements with 'if' / 'for' context. * Command '\' is an alias for 'sorry', with different typesetting. E.g. to produce proof holes in examples and documentation. * The defining position of a literal fact \prop\ is maintained more carefully, and made accessible as hyperlink in the Prover IDE. * Commands 'finally' and 'ultimately' used to expose the result as literal fact: this accidental behaviour has been discontinued. Rare INCOMPATIBILITY, use more explicit means to refer to facts in Isar. * Command 'axiomatization' has become more restrictive to correspond better to internal axioms as singleton facts with mandatory name. Minor INCOMPATIBILITY. * Proof methods may refer to the main facts via the dynamic fact "method_facts". This is particularly useful for Eisbach method definitions. * Proof method "use" allows to modify the main facts of a given method expression, e.g. (use facts in simp) (use facts in \simp add: ...\) * The old proof method "default" has been removed (legacy since Isabelle2016). INCOMPATIBILITY, use "standard" instead. *** Pure *** * Pure provides basic versions of proof methods "simp" and "simp_all" that only know about meta-equality (==). Potential INCOMPATIBILITY in theory imports that merge Pure with e.g. Main of Isabelle/HOL: the order is relevant to avoid confusion of Pure.simp vs. HOL.simp. * The command 'unfolding' and proof method "unfold" include a second stage where given equations are passed through the attribute "abs_def" before rewriting. This ensures that definitions are fully expanded, regardless of the actual parameters that are provided. Rare INCOMPATIBILITY in some corner cases: use proof method (simp only:) instead, or declare [[unfold_abs_def = false]] in the proof context. * Type-inference improves sorts of newly introduced type variables for the object-logic, using its base sort (i.e. HOL.type for Isabelle/HOL). Thus terms like "f x" or "\x. P x" without any further syntactic context produce x::'a::type in HOL instead of x::'a::{} in Pure. Rare INCOMPATIBILITY, need to provide explicit type constraints for Pure types where this is really intended. *** HOL *** * New proof method "argo" using the built-in Argo solver based on SMT technology. The method can be used to prove goals of quantifier-free propositional logic, goals based on a combination of quantifier-free propositional logic with equality, and goals based on a combination of quantifier-free propositional logic with linear real arithmetic including min/max/abs. See HOL/ex/Argo_Examples.thy for examples. * The new "nunchaku" command integrates the Nunchaku model finder. The tool is experimental. See ~~/src/HOL/Nunchaku/Nunchaku.thy for details. * Metis: The problem encoding has changed very slightly. This might break existing proofs. INCOMPATIBILITY. * Sledgehammer: - The MaSh relevance filter is now faster than before. - Produce syntactically correct Vampire 4.0 problem files. * (Co)datatype package: - New commands for defining corecursive functions and reasoning about them in "~~/src/HOL/Library/BNF_Corec.thy": 'corec', 'corecursive', 'friend_of_corec', and 'corecursion_upto'; and 'corec_unique' proof method. See 'isabelle doc corec'. - The predicator :: ('a \ bool) \ 'a F \ bool is now a first-class citizen in bounded natural functors. - 'primrec' now allows nested calls through the predicator in addition to the map function. - 'bnf' automatically discharges reflexive proof obligations. - 'bnf' outputs a slightly modified proof obligation expressing rel in terms of map and set (not giving a specification for rel makes this one reflexive). - 'bnf' outputs a new proof obligation expressing pred in terms of set (not giving a specification for pred makes this one reflexive). INCOMPATIBILITY: manual 'bnf' declarations may need adjustment. - Renamed lemmas: rel_prod_apply ~> rel_prod_inject pred_prod_apply ~> pred_prod_inject INCOMPATIBILITY. - The "size" plugin has been made compatible again with locales. - The theorems about "rel" and "set" may have a slightly different (but equivalent) form. INCOMPATIBILITY. * The 'coinductive' command produces a proper coinduction rule for mutual coinductive predicates. This new rule replaces the old rule, which exposed details of the internal fixpoint construction and was hard to use. INCOMPATIBILITY. * New abbreviations for negated existence (but not bounded existence): \x. P x \ \ (\x. P x) \!x. P x \ \ (\!x. P x) * The print mode "HOL" for ASCII syntax of binders "!", "?", "?!", "@" has been removed for output. It is retained for input only, until it is eliminated altogether. * The unique existence quantifier no longer provides 'binder' syntax, but uses syntax translations (as for bounded unique existence). Thus iterated quantification \!x y. P x y with its slightly confusing sequential meaning \!x. \!y. P x y is no longer possible. Instead, pattern abstraction admits simultaneous unique existence \!(x, y). P x y (analogous to existing notation \!(x, y)\A. P x y). Potential INCOMPATIBILITY in rare situations. * Conventional syntax "%(). t" for unit abstractions. Slight syntactic INCOMPATIBILITY. * Renamed constants and corresponding theorems: setsum ~> sum setprod ~> prod listsum ~> sum_list listprod ~> prod_list INCOMPATIBILITY. * Sligthly more standardized theorem names: sgn_times ~> sgn_mult sgn_mult' ~> Real_Vector_Spaces.sgn_mult divide_zero_left ~> div_0 zero_mod_left ~> mod_0 divide_zero ~> div_by_0 divide_1 ~> div_by_1 nonzero_mult_divide_cancel_left ~> nonzero_mult_div_cancel_left div_mult_self1_is_id ~> nonzero_mult_div_cancel_left nonzero_mult_divide_cancel_right ~> nonzero_mult_div_cancel_right div_mult_self2_is_id ~> nonzero_mult_div_cancel_right is_unit_divide_mult_cancel_left ~> is_unit_div_mult_cancel_left is_unit_divide_mult_cancel_right ~> is_unit_div_mult_cancel_right mod_div_equality ~> div_mult_mod_eq mod_div_equality2 ~> mult_div_mod_eq mod_div_equality3 ~> mod_div_mult_eq mod_div_equality4 ~> mod_mult_div_eq minus_div_eq_mod ~> minus_div_mult_eq_mod minus_div_eq_mod2 ~> minus_mult_div_eq_mod minus_mod_eq_div ~> minus_mod_eq_div_mult minus_mod_eq_div2 ~> minus_mod_eq_mult_div div_mod_equality' ~> minus_mod_eq_div_mult [symmetric] mod_div_equality' ~> minus_div_mult_eq_mod [symmetric] zmod_zdiv_equality ~> mult_div_mod_eq [symmetric] zmod_zdiv_equality' ~> minus_div_mult_eq_mod [symmetric] Divides.mult_div_cancel ~> minus_mod_eq_mult_div [symmetric] mult_div_cancel ~> minus_mod_eq_mult_div [symmetric] zmult_div_cancel ~> minus_mod_eq_mult_div [symmetric] div_1 ~> div_by_Suc_0 mod_1 ~> mod_by_Suc_0 INCOMPATIBILITY. * New type class "idom_abs_sgn" specifies algebraic properties of sign and absolute value functions. Type class "sgn_if" has disappeared. Slight INCOMPATIBILITY. * Dedicated syntax LENGTH('a) for length of types. * Characters (type char) are modelled as finite algebraic type corresponding to {0..255}. - Logical representation: * 0 is instantiated to the ASCII zero character. * All other characters are represented as "Char n" with n being a raw numeral expression less than 256. * Expressions of the form "Char n" with n greater than 255 are non-canonical. - Printing and parsing: * Printable characters are printed and parsed as "CHR ''\''" (as before). * The ASCII zero character is printed and parsed as "0". * All other canonical characters are printed as "CHR 0xXX" with XX being the hexadecimal character code. "CHR n" is parsable for every numeral expression n. * Non-canonical characters have no special syntax and are printed as their logical representation. - Explicit conversions from and to the natural numbers are provided as char_of_nat, nat_of_char (as before). - The auxiliary nibble type has been discontinued. INCOMPATIBILITY. * Type class "div" with operation "mod" renamed to type class "modulo" with operation "modulo", analogously to type class "divide". This eliminates the need to qualify any of those names in the presence of infix "mod" syntax. INCOMPATIBILITY. * Statements and proofs of Knaster-Tarski fixpoint combinators lfp/gfp have been clarified. The fixpoint properties are lfp_fixpoint, its symmetric lfp_unfold (as before), and the duals for gfp. Auxiliary items for the proof (lfp_lemma2 etc.) are no longer exported, but can be easily recovered by composition with eq_refl. Minor INCOMPATIBILITY. * Constant "surj" is a mere input abbreviation, to avoid hiding an equation in term output. Minor INCOMPATIBILITY. * Command 'code_reflect' accepts empty constructor lists for datatypes, which renders those abstract effectively. * Command 'export_code' checks given constants for abstraction violations: a small guarantee that given constants specify a safe interface for the generated code. * Code generation for Scala: ambiguous implicts in class diagrams are spelt out explicitly. * Static evaluators (Code_Evaluation.static_* in Isabelle/ML) rely on explicitly provided auxiliary definitions for required type class dictionaries rather than half-working magic. INCOMPATIBILITY, see the tutorial on code generation for details. * Theory Set_Interval: substantial new theorems on indexed sums and products. * Locale bijection establishes convenient default simp rules such as "inv f (f a) = a" for total bijections. * Abstract locales semigroup, abel_semigroup, semilattice, semilattice_neutr, ordering, ordering_top, semilattice_order, semilattice_neutr_order, comm_monoid_set, semilattice_set, semilattice_neutr_set, semilattice_order_set, semilattice_order_neutr_set monoid_list, comm_monoid_list, comm_monoid_list_set, comm_monoid_mset, comm_monoid_fun use boldified syntax uniformly that does not clash with corresponding global syntax. INCOMPATIBILITY. * Former locale lifting_syntax is now a bundle, which is easier to include in a local context or theorem statement, e.g. "context includes lifting_syntax begin ... end". Minor INCOMPATIBILITY. * Some old / obsolete theorems have been renamed / removed, potential INCOMPATIBILITY. nat_less_cases -- removed, use linorder_cases instead inv_image_comp -- removed, use image_inv_f_f instead image_surj_f_inv_f ~> image_f_inv_f * Some theorems about groups and orders have been generalised from groups to semi-groups that are also monoids: le_add_same_cancel1 le_add_same_cancel2 less_add_same_cancel1 less_add_same_cancel2 add_le_same_cancel1 add_le_same_cancel2 add_less_same_cancel1 add_less_same_cancel2 * Some simplifications theorems about rings have been removed, since superseeded by a more general version: less_add_cancel_left_greater_zero ~> less_add_same_cancel1 less_add_cancel_right_greater_zero ~> less_add_same_cancel2 less_eq_add_cancel_left_greater_eq_zero ~> le_add_same_cancel1 less_eq_add_cancel_right_greater_eq_zero ~> le_add_same_cancel2 less_eq_add_cancel_left_less_eq_zero ~> add_le_same_cancel1 less_eq_add_cancel_right_less_eq_zero ~> add_le_same_cancel2 less_add_cancel_left_less_zero ~> add_less_same_cancel1 less_add_cancel_right_less_zero ~> add_less_same_cancel2 INCOMPATIBILITY. * Renamed split_if -> if_split and split_if_asm -> if_split_asm to resemble the f.split naming convention, INCOMPATIBILITY. * Added class topological_monoid. * The following theorems have been renamed: setsum_left_distrib ~> sum_distrib_right setsum_right_distrib ~> sum_distrib_left INCOMPATIBILITY. * Compound constants INFIMUM and SUPREMUM are mere abbreviations now. INCOMPATIBILITY. * "Gcd (f ` A)" and "Lcm (f ` A)" are printed with optional comprehension-like syntax analogously to "Inf (f ` A)" and "Sup (f ` A)". * Class semiring_Lcd merged into semiring_Gcd. INCOMPATIBILITY. * The type class ordered_comm_monoid_add is now called ordered_cancel_comm_monoid_add. A new type class ordered_comm_monoid_add is introduced as the combination of ordered_ab_semigroup_add + comm_monoid_add. INCOMPATIBILITY. * Introduced the type classes canonically_ordered_comm_monoid_add and dioid. * Introduced the type class ordered_ab_semigroup_monoid_add_imp_le. When instantiating linordered_semiring_strict and ordered_ab_group_add, an explicit instantiation of ordered_ab_semigroup_monoid_add_imp_le might be required. INCOMPATIBILITY. * Dropped various legacy fact bindings, whose replacements are often of a more general type also: lcm_left_commute_nat ~> lcm.left_commute lcm_left_commute_int ~> lcm.left_commute gcd_left_commute_nat ~> gcd.left_commute gcd_left_commute_int ~> gcd.left_commute gcd_greatest_iff_nat ~> gcd_greatest_iff gcd_greatest_iff_int ~> gcd_greatest_iff coprime_dvd_mult_nat ~> coprime_dvd_mult coprime_dvd_mult_int ~> coprime_dvd_mult zpower_numeral_even ~> power_numeral_even gcd_mult_cancel_nat ~> gcd_mult_cancel gcd_mult_cancel_int ~> gcd_mult_cancel div_gcd_coprime_nat ~> div_gcd_coprime div_gcd_coprime_int ~> div_gcd_coprime zpower_numeral_odd ~> power_numeral_odd zero_less_int_conv ~> of_nat_0_less_iff gcd_greatest_nat ~> gcd_greatest gcd_greatest_int ~> gcd_greatest coprime_mult_nat ~> coprime_mult coprime_mult_int ~> coprime_mult lcm_commute_nat ~> lcm.commute lcm_commute_int ~> lcm.commute int_less_0_conv ~> of_nat_less_0_iff gcd_commute_nat ~> gcd.commute gcd_commute_int ~> gcd.commute Gcd_insert_nat ~> Gcd_insert Gcd_insert_int ~> Gcd_insert of_int_int_eq ~> of_int_of_nat_eq lcm_least_nat ~> lcm_least lcm_least_int ~> lcm_least lcm_assoc_nat ~> lcm.assoc lcm_assoc_int ~> lcm.assoc int_le_0_conv ~> of_nat_le_0_iff int_eq_0_conv ~> of_nat_eq_0_iff Gcd_empty_nat ~> Gcd_empty Gcd_empty_int ~> Gcd_empty gcd_assoc_nat ~> gcd.assoc gcd_assoc_int ~> gcd.assoc zero_zle_int ~> of_nat_0_le_iff lcm_dvd2_nat ~> dvd_lcm2 lcm_dvd2_int ~> dvd_lcm2 lcm_dvd1_nat ~> dvd_lcm1 lcm_dvd1_int ~> dvd_lcm1 gcd_zero_nat ~> gcd_eq_0_iff gcd_zero_int ~> gcd_eq_0_iff gcd_dvd2_nat ~> gcd_dvd2 gcd_dvd2_int ~> gcd_dvd2 gcd_dvd1_nat ~> gcd_dvd1 gcd_dvd1_int ~> gcd_dvd1 int_numeral ~> of_nat_numeral lcm_ac_nat ~> ac_simps lcm_ac_int ~> ac_simps gcd_ac_nat ~> ac_simps gcd_ac_int ~> ac_simps abs_int_eq ~> abs_of_nat zless_int ~> of_nat_less_iff zdiff_int ~> of_nat_diff zadd_int ~> of_nat_add int_mult ~> of_nat_mult int_Suc ~> of_nat_Suc inj_int ~> inj_of_nat int_1 ~> of_nat_1 int_0 ~> of_nat_0 Lcm_empty_nat ~> Lcm_empty Lcm_empty_int ~> Lcm_empty Lcm_insert_nat ~> Lcm_insert Lcm_insert_int ~> Lcm_insert comp_fun_idem_gcd_nat ~> comp_fun_idem_gcd comp_fun_idem_gcd_int ~> comp_fun_idem_gcd comp_fun_idem_lcm_nat ~> comp_fun_idem_lcm comp_fun_idem_lcm_int ~> comp_fun_idem_lcm Lcm_eq_0 ~> Lcm_eq_0_I Lcm0_iff ~> Lcm_0_iff Lcm_dvd_int ~> Lcm_least divides_mult_nat ~> divides_mult divides_mult_int ~> divides_mult lcm_0_nat ~> lcm_0_right lcm_0_int ~> lcm_0_right lcm_0_left_nat ~> lcm_0_left lcm_0_left_int ~> lcm_0_left dvd_gcd_D1_nat ~> dvd_gcdD1 dvd_gcd_D1_int ~> dvd_gcdD1 dvd_gcd_D2_nat ~> dvd_gcdD2 dvd_gcd_D2_int ~> dvd_gcdD2 coprime_dvd_mult_iff_nat ~> coprime_dvd_mult_iff coprime_dvd_mult_iff_int ~> coprime_dvd_mult_iff realpow_minus_mult ~> power_minus_mult realpow_Suc_le_self ~> power_Suc_le_self dvd_Gcd, dvd_Gcd_nat, dvd_Gcd_int removed in favour of Gcd_greatest INCOMPATIBILITY. * Renamed HOL/Quotient_Examples/FSet.thy to HOL/Quotient_Examples/Quotient_FSet.thy INCOMPATIBILITY. * Session HOL-Library: theory FinFun bundles "finfun_syntax" and "no_finfun_syntax" allow to control optional syntax in local contexts; this supersedes former theory FinFun_Syntax. INCOMPATIBILITY, e.g. use "unbundle finfun_syntax" to imitate import of "~~/src/HOL/Library/FinFun_Syntax". * Session HOL-Library: theory Multiset_Permutations (executably) defines the set of permutations of a given set or multiset, i.e. the set of all lists that contain every element of the carrier (multi-)set exactly once. * Session HOL-Library: multiset membership is now expressed using set_mset rather than count. - Expressions "count M a > 0" and similar simplify to membership by default. - Converting between "count M a = 0" and non-membership happens using equations count_eq_zero_iff and not_in_iff. - Rules count_inI and in_countE obtain facts of the form "count M a = n" from membership. - Rules count_in_diffI and in_diff_countE obtain facts of the form "count M a = n + count N a" from membership on difference sets. INCOMPATIBILITY. * Session HOL-Library: theory LaTeXsugar uses new-style "dummy_pats" for displaying equations in functional programming style --- variables present on the left-hand but not on the righ-hand side are replaced by underscores. * Session HOL-Library: theory Combinator_PER provides combinator to build partial equivalence relations from a predicate and an equivalence relation. * Session HOL-Library: theory Perm provides basic facts about almost everywhere fix bijections. * Session HOL-Library: theory Normalized_Fraction allows viewing an element of a field of fractions as a normalized fraction (i.e. a pair of numerator and denominator such that the two are coprime and the denominator is normalized wrt. unit factors). * Session HOL-NSA has been renamed to HOL-Nonstandard_Analysis. * Session HOL-Multivariate_Analysis has been renamed to HOL-Analysis. * Session HOL-Analysis: measure theory has been moved here from HOL-Probability. When importing HOL-Analysis some theorems need additional name spaces prefixes due to name clashes. INCOMPATIBILITY. * Session HOL-Analysis: more complex analysis including Cauchy's inequality, Liouville theorem, open mapping theorem, maximum modulus principle, Residue theorem, Schwarz Lemma. * Session HOL-Analysis: Theory of polyhedra: faces, extreme points, polytopes, and the Krein–Milman Minkowski theorem. * Session HOL-Analysis: Numerous results ported from the HOL Light libraries: homeomorphisms, continuous function extensions, invariance of domain. * Session HOL-Probability: the type of emeasure and nn_integral was changed from ereal to ennreal, INCOMPATIBILITY. emeasure :: 'a measure \ 'a set \ ennreal nn_integral :: 'a measure \ ('a \ ennreal) \ ennreal * Session HOL-Probability: Code generation and QuickCheck for Probability Mass Functions. * Session HOL-Probability: theory Random_Permutations contains some theory about choosing a permutation of a set uniformly at random and folding over a list in random order. * Session HOL-Probability: theory SPMF formalises discrete subprobability distributions. * Session HOL-Library: the names of multiset theorems have been normalised to distinguish which ordering the theorems are about mset_less_eqI ~> mset_subset_eqI mset_less_insertD ~> mset_subset_insertD mset_less_eq_count ~> mset_subset_eq_count mset_less_diff_self ~> mset_subset_diff_self mset_le_exists_conv ~> mset_subset_eq_exists_conv mset_le_mono_add_right_cancel ~> mset_subset_eq_mono_add_right_cancel mset_le_mono_add_left_cancel ~> mset_subset_eq_mono_add_left_cancel mset_le_mono_add ~> mset_subset_eq_mono_add mset_le_add_left ~> mset_subset_eq_add_left mset_le_add_right ~> mset_subset_eq_add_right mset_le_single ~> mset_subset_eq_single mset_le_multiset_union_diff_commute ~> mset_subset_eq_multiset_union_diff_commute diff_le_self ~> diff_subset_eq_self mset_leD ~> mset_subset_eqD mset_lessD ~> mset_subsetD mset_le_insertD ~> mset_subset_eq_insertD mset_less_of_empty ~> mset_subset_of_empty mset_less_size ~> mset_subset_size wf_less_mset_rel ~> wf_subset_mset_rel count_le_replicate_mset_le ~> count_le_replicate_mset_subset_eq mset_remdups_le ~> mset_remdups_subset_eq ms_lesseq_impl ~> subset_eq_mset_impl Some functions have been renamed: ms_lesseq_impl -> subset_eq_mset_impl * HOL-Library: multisets are now ordered with the multiset ordering #\# ~> \ #\# ~> < le_multiset ~> less_eq_multiset less_multiset ~> le_multiset INCOMPATIBILITY. * Session HOL-Library: the prefix multiset_order has been discontinued: the theorems can be directly accessed. As a consequence, the lemmas "order_multiset" and "linorder_multiset" have been discontinued, and the interpretations "multiset_linorder" and "multiset_wellorder" have been replaced by instantiations. INCOMPATIBILITY. * Session HOL-Library: some theorems about the multiset ordering have been renamed: le_multiset_def ~> less_eq_multiset_def less_multiset_def ~> le_multiset_def less_eq_imp_le_multiset ~> subset_eq_imp_le_multiset mult_less_not_refl ~> mset_le_not_refl mult_less_trans ~> mset_le_trans mult_less_not_sym ~> mset_le_not_sym mult_less_asym ~> mset_le_asym mult_less_irrefl ~> mset_le_irrefl union_less_mono2{,1,2} ~> union_le_mono2{,1,2} le_multiset\<^sub>H\<^sub>O ~> less_eq_multiset\<^sub>H\<^sub>O le_multiset_total ~> less_eq_multiset_total less_multiset_right_total ~> subset_eq_imp_le_multiset le_multiset_empty_left ~> less_eq_multiset_empty_left le_multiset_empty_right ~> less_eq_multiset_empty_right less_multiset_empty_right ~> le_multiset_empty_left less_multiset_empty_left ~> le_multiset_empty_right union_less_diff_plus ~> union_le_diff_plus ex_gt_count_imp_less_multiset ~> ex_gt_count_imp_le_multiset less_multiset_plus_left_nonempty ~> le_multiset_plus_left_nonempty le_multiset_plus_right_nonempty ~> le_multiset_plus_right_nonempty INCOMPATIBILITY. * Session HOL-Library: the lemma mset_map has now the attribute [simp]. INCOMPATIBILITY. * Session HOL-Library: some theorems about multisets have been removed. INCOMPATIBILITY, use the following replacements: le_multiset_plus_plus_left_iff ~> add_less_cancel_right less_multiset_plus_plus_left_iff ~> add_less_cancel_right le_multiset_plus_plus_right_iff ~> add_less_cancel_left less_multiset_plus_plus_right_iff ~> add_less_cancel_left add_eq_self_empty_iff ~> add_cancel_left_right mset_subset_add_bothsides ~> subset_mset.add_less_cancel_right mset_less_add_bothsides ~> subset_mset.add_less_cancel_right mset_le_add_bothsides ~> subset_mset.add_less_cancel_right empty_inter ~> subset_mset.inf_bot_left inter_empty ~> subset_mset.inf_bot_right empty_sup ~> subset_mset.sup_bot_left sup_empty ~> subset_mset.sup_bot_right bdd_below_multiset ~> subset_mset.bdd_above_bot subset_eq_empty ~> subset_mset.le_zero_eq le_empty ~> subset_mset.le_zero_eq mset_subset_empty_nonempty ~> subset_mset.zero_less_iff_neq_zero mset_less_empty_nonempty ~> subset_mset.zero_less_iff_neq_zero * Session HOL-Library: some typeclass constraints about multisets have been reduced from ordered or linordered to preorder. Multisets have the additional typeclasses order_bot, no_top, ordered_ab_semigroup_add_imp_le, ordered_cancel_comm_monoid_add, linordered_cancel_ab_semigroup_add, and ordered_ab_semigroup_monoid_add_imp_le. INCOMPATIBILITY. * Session HOL-Library: there are some new simplification rules about multisets, the multiset ordering, and the subset ordering on multisets. INCOMPATIBILITY. * Session HOL-Library: the subset ordering on multisets has now the interpretations ordered_ab_semigroup_monoid_add_imp_le and bounded_lattice_bot. INCOMPATIBILITY. * Session HOL-Library, theory Multiset: single has been removed in favor of add_mset that roughly corresponds to Set.insert. Some theorems have removed or changed: single_not_empty ~> add_mset_not_empty or empty_not_add_mset fold_mset_insert ~> fold_mset_add_mset image_mset_insert ~> image_mset_add_mset union_single_eq_diff multi_self_add_other_not_self diff_single_eq_union INCOMPATIBILITY. * Session HOL-Library, theory Multiset: some theorems have been changed to use add_mset instead of single: mset_add multi_self_add_other_not_self diff_single_eq_union union_single_eq_diff union_single_eq_member add_eq_conv_diff insert_noteq_member add_eq_conv_ex multi_member_split multiset_add_sub_el_shuffle mset_subset_eq_insertD mset_subset_insertD insert_subset_eq_iff insert_union_subset_iff multi_psub_of_add_self inter_add_left1 inter_add_left2 inter_add_right1 inter_add_right2 sup_union_left1 sup_union_left2 sup_union_right1 sup_union_right2 size_eq_Suc_imp_eq_union multi_nonempty_split mset_insort mset_update mult1I less_add mset_zip_take_Cons_drop_twice rel_mset_Zero msed_map_invL msed_map_invR msed_rel_invL msed_rel_invR le_multiset_right_total multiset_induct multiset_induct2_size multiset_induct2 INCOMPATIBILITY. * Session HOL-Library, theory Multiset: the definitions of some constants have changed to use add_mset instead of adding a single element: image_mset mset replicate_mset mult1 pred_mset rel_mset' mset_insort INCOMPATIBILITY. * Session HOL-Library, theory Multiset: due to the above changes, the attributes of some multiset theorems have been changed: insert_DiffM [] ~> [simp] insert_DiffM2 [simp] ~> [] diff_add_mset_swap [simp] fold_mset_add_mset [simp] diff_diff_add [simp] (for multisets only) diff_cancel [simp] ~> [] count_single [simp] ~> [] set_mset_single [simp] ~> [] size_multiset_single [simp] ~> [] size_single [simp] ~> [] image_mset_single [simp] ~> [] mset_subset_eq_mono_add_right_cancel [simp] ~> [] mset_subset_eq_mono_add_left_cancel [simp] ~> [] fold_mset_single [simp] ~> [] subset_eq_empty [simp] ~> [] empty_sup [simp] ~> [] sup_empty [simp] ~> [] inter_empty [simp] ~> [] empty_inter [simp] ~> [] INCOMPATIBILITY. * Session HOL-Library, theory Multiset: the order of the variables in the second cases of multiset_induct, multiset_induct2_size, multiset_induct2 has been changed (e.g. Add A a ~> Add a A). INCOMPATIBILITY. * Session HOL-Library, theory Multiset: there is now a simplification procedure on multisets. It mimics the behavior of the procedure on natural numbers. INCOMPATIBILITY. * Session HOL-Library, theory Multiset: renamed sums and products of multisets: msetsum ~> sum_mset msetprod ~> prod_mset * Session HOL-Library, theory Multiset: the notation for intersection and union of multisets have been changed: #\ ~> \# #\ ~> \# INCOMPATIBILITY. * Session HOL-Library, theory Multiset: the lemma one_step_implies_mult_aux on multisets has been removed, use one_step_implies_mult instead. INCOMPATIBILITY. * Session HOL-Library: theory Complete_Partial_Order2 provides reasoning support for monotonicity and continuity in chain-complete partial orders and about admissibility conditions for fixpoint inductions. * Session HOL-Library: theory Library/Polynomial contains also derivation of polynomials (formerly in Library/Poly_Deriv) but not gcd/lcm on polynomials over fields. This has been moved to a separate theory Library/Polynomial_GCD_euclidean.thy, to pave way for a possible future different type class instantiation for polynomials over factorial rings. INCOMPATIBILITY. * Session HOL-Library: theory Sublist provides function "prefixes" with the following renaming prefixeq -> prefix prefix -> strict_prefix suffixeq -> suffix suffix -> strict_suffix Added theory of longest common prefixes. * Session HOL-Number_Theory: algebraic foundation for primes: Generalisation of predicate "prime" and introduction of predicates "prime_elem", "irreducible", a "prime_factorization" function, and the "factorial_ring" typeclass with instance proofs for nat, int, poly. Some theorems now have different names, most notably "prime_def" is now "prime_nat_iff". INCOMPATIBILITY. * Session Old_Number_Theory has been removed, after porting remaining theories. * Session HOL-Types_To_Sets provides an experimental extension of Higher-Order Logic to allow translation of types to sets. *** ML *** * Integer.gcd and Integer.lcm use efficient operations from the Poly/ML library (notably for big integers). Subtle change of semantics: Integer.gcd and Integer.lcm both normalize the sign, results are never negative. This coincides with the definitions in HOL/GCD.thy. INCOMPATIBILITY. * Structure Rat for rational numbers is now an integral part of Isabelle/ML, with special notation @int/nat or @int for numerals (an abbreviation for antiquotation @{Pure.rat argument}) and ML pretty printing. Standard operations on type Rat.rat are provided via ad-hoc overloading of + - * / < <= > >= ~ abs. INCOMPATIBILITY, need to use + instead of +/ etc. Moreover, exception Rat.DIVZERO has been superseded by General.Div. * ML antiquotation @{path} is superseded by @{file}, which ensures that the argument is a plain file. Minor INCOMPATIBILITY. * Antiquotation @{make_string} is available during Pure bootstrap -- with approximative output quality. * Low-level ML system structures (like PolyML and RunCall) are no longer exposed to Isabelle/ML user-space. Potential INCOMPATIBILITY. * The ML function "ML" provides easy access to run-time compilation. This is particularly useful for conditional compilation, without requiring separate files. * Option ML_exception_debugger controls detailed exception trace via the Poly/ML debugger. Relevant ML modules need to be compiled beforehand with ML_file_debug, or with ML_file and option ML_debugger enabled. Note debugger information requires consirable time and space: main Isabelle/HOL with full debugger support may need ML_system_64. * Local_Theory.restore has been renamed to Local_Theory.reset to emphasize its disruptive impact on the cumulative context, notably the scope of 'private' or 'qualified' names. Note that Local_Theory.reset is only appropriate when targets are managed, e.g. starting from a global theory and returning to it. Regular definitional packages should use balanced blocks of Local_Theory.open_target versus Local_Theory.close_target instead. Rare INCOMPATIBILITY. * Structure TimeLimit (originally from the SML/NJ library) has been replaced by structure Timeout, with slightly different signature. INCOMPATIBILITY. * Discontinued cd and pwd operations, which are not well-defined in a multi-threaded environment. Note that files are usually located relatively to the master directory of a theory (see also File.full_path). Potential INCOMPATIBILITY. * Binding.empty_atts supersedes Thm.empty_binding and Attrib.empty_binding. Minor INCOMPATIBILITY. *** System *** * SML/NJ and old versions of Poly/ML are no longer supported. * Poly/ML heaps now follow the hierarchy of sessions, and thus require much less disk space. * The Isabelle ML process is now managed directly by Isabelle/Scala, and shell scripts merely provide optional command-line access. In particular: . Scala module ML_Process to connect to the raw ML process, with interaction via stdin/stdout/stderr or in batch mode; . command-line tool "isabelle console" as interactive wrapper; . command-line tool "isabelle process" as batch mode wrapper. * The executable "isabelle_process" has been discontinued. Tools and prover front-ends should use ML_Process or Isabelle_Process in Isabelle/Scala. INCOMPATIBILITY. * New command-line tool "isabelle process" supports ML evaluation of literal expressions (option -e) or files (option -f) in the context of a given heap image. Errors lead to premature exit of the ML process with return code 1. * The command-line tool "isabelle build" supports option -N for cyclic shuffling of NUMA CPU nodes. This may help performance tuning on Linux servers with separate CPU/memory modules. * System option "threads" (for the size of the Isabelle/ML thread farm) is also passed to the underlying ML runtime system as --gcthreads, unless there is already a default provided via ML_OPTIONS settings. * System option "checkpoint" helps to fine-tune the global heap space management of isabelle build. This is relevant for big sessions that may exhaust the small 32-bit address space of the ML process (which is used by default). * System option "profiling" specifies the mode for global ML profiling in "isabelle build". Possible values are "time", "allocations". The command-line tool "isabelle profiling_report" helps to digest the resulting log files. * System option "ML_process_policy" specifies an optional command prefix for the underlying ML process, e.g. to control CPU affinity on multiprocessor systems. The "isabelle jedit" tool allows to override the implicit default via option -p. * Command-line tool "isabelle console" provides option -r to help to bootstrapping Isabelle/Pure interactively. * Command-line tool "isabelle yxml" has been discontinued. INCOMPATIBILITY, use operations from the modules "XML" and "YXML" in Isabelle/ML or Isabelle/Scala. * Many Isabelle tools that require a Java runtime system refer to the settings ISABELLE_TOOL_JAVA_OPTIONS32 / ISABELLE_TOOL_JAVA_OPTIONS64, depending on the underlying platform. The settings for "isabelle build" ISABELLE_BUILD_JAVA_OPTIONS32 / ISABELLE_BUILD_JAVA_OPTIONS64 have been discontinued. Potential INCOMPATIBILITY. * The Isabelle system environment always ensures that the main executables are found within the shell search $PATH: "isabelle" and "isabelle_scala_script". * Isabelle tools may consist of .scala files: the Scala compiler is invoked on the spot. The source needs to define some object that extends Isabelle_Tool.Body. * File.bash_string, File.bash_path etc. represent Isabelle/ML and Isabelle/Scala strings authentically within GNU bash. This is useful to produce robust shell scripts under program control, without worrying about spaces or special characters. Note that user output works via Path.print (ML) or Path.toString (Scala). INCOMPATIBILITY, the old (and less versatile) operations File.shell_quote, File.shell_path etc. have been discontinued. * The isabelle_java executable allows to run a Java process within the name space of Java and Scala components that are bundled with Isabelle, but without the Isabelle settings environment. * Isabelle/Scala: the SSH module supports ssh and sftp connections, for remote command-execution and file-system access. This resembles operations from module File and Isabelle_System to some extent. Note that Path specifications need to be resolved remotely via ssh.remote_path instead of File.standard_path: the implicit process environment is different, Isabelle settings are not available remotely. * Isabelle/Scala: the Mercurial module supports repositories via the regular hg command-line interface. The repositroy clone and working directory may reside on a local or remote file-system (via ssh connection). New in Isabelle2016 (February 2016) ----------------------------------- *** General *** * Eisbach is now based on Pure instead of HOL. Objects-logics may import either the theory ~~/src/HOL/Eisbach/Eisbach (for HOL etc.) or ~~/src/HOL/Eisbach/Eisbach_Old_Appl_Syntax (for FOL, ZF etc.). Note that the HOL-Eisbach session located in ~~/src/HOL/Eisbach/ contains further examples that do require HOL. * Better resource usage on all platforms (Linux, Windows, Mac OS X) for both Isabelle/ML and Isabelle/Scala. Slightly reduced heap space usage. * Former "xsymbols" syntax with Isabelle symbols is used by default, without any special print mode. Important ASCII replacement syntax remains available under print mode "ASCII", but less important syntax has been removed (see below). * Support for more arrow symbols, with rendering in LaTeX and Isabelle fonts: \ \ \ \ \ \. * Special notation \ for the first implicit 'structure' in the context has been discontinued. Rare INCOMPATIBILITY, use explicit structure name instead, notably in indexed notation with block-subscript (e.g. \\<^bsub>A\<^esub>). * The glyph for \ in the IsabelleText font now corresponds better to its counterpart \ as quantifier-like symbol. A small diamond is available as \; the old symbol \ loses this rendering and any special meaning. * Syntax for formal comments "-- text" now also supports the symbolic form "\ text". Command-line tool "isabelle update_cartouches -c" helps to update old sources. * Toplevel theorem statements have been simplified as follows: theorems ~> lemmas schematic_lemma ~> schematic_goal schematic_theorem ~> schematic_goal schematic_corollary ~> schematic_goal Command-line tool "isabelle update_theorems" updates theory sources accordingly. * Toplevel theorem statement 'proposition' is another alias for 'theorem'. * The old 'defs' command has been removed (legacy since Isabelle2014). INCOMPATIBILITY, use regular 'definition' instead. Overloaded and/or deferred definitions require a surrounding 'overloading' block. *** Prover IDE -- Isabelle/Scala/jEdit *** * IDE support for the source-level debugger of Poly/ML, to work with Isabelle/ML and official Standard ML. Option "ML_debugger" and commands 'ML_file_debug', 'ML_file_no_debug', 'SML_file_debug', 'SML_file_no_debug' control compilation of sources with or without debugging information. The Debugger panel allows to set breakpoints (via context menu), step through stopped threads, evaluate local ML expressions etc. At least one Debugger view needs to be active to have any effect on the running ML program. * The State panel manages explicit proof state output, with dynamic auto-update according to cursor movement. Alternatively, the jEdit action "isabelle.update-state" (shortcut S+ENTER) triggers manual update. * The Output panel no longer shows proof state output by default, to avoid GUI overcrowding. INCOMPATIBILITY, use the State panel instead or enable option "editor_output_state". * The text overview column (status of errors, warnings etc.) is updated asynchronously, leading to much better editor reactivity. Moreover, the full document node content is taken into account. The width of the column is scaled according to the main text area font, for improved visibility. * The main text area no longer changes its color hue in outdated situations. The text overview column takes over the role to indicate unfinished edits in the PIDE pipeline. This avoids flashing text display due to ad-hoc updates by auxiliary GUI components, such as the State panel. * Slightly improved scheduling for urgent print tasks (e.g. command state output, interactive queries) wrt. long-running background tasks. * Completion of symbols via prefix of \ or \<^name> or \name is always possible, independently of the language context. It is never implicit: a popup will show up unconditionally. * Additional abbreviations for syntactic completion may be specified in $ISABELLE_HOME/etc/abbrevs and $ISABELLE_HOME_USER/etc/abbrevs, with support for simple templates using ASCII 007 (bell) as placeholder. * Symbols \, \, \, \, \, \, \, \ no longer provide abbreviations for completion like "+o", "*o", ".o" etc. -- due to conflicts with other ASCII syntax. INCOMPATIBILITY, use plain backslash-completion or define suitable abbreviations in $ISABELLE_HOME_USER/etc/abbrevs. * Action "isabelle-emph" (with keyboard shortcut C+e LEFT) controls emphasized text style; the effect is visible in document output, not in the editor. * Action "isabelle-reset" now uses keyboard shortcut C+e BACK_SPACE, instead of former C+e LEFT. * The command-line tool "isabelle jedit" and the isabelle.Main application wrapper treat the default $USER_HOME/Scratch.thy more uniformly, and allow the dummy file argument ":" to open an empty buffer instead. * New command-line tool "isabelle jedit_client" allows to connect to an already running Isabelle/jEdit process. This achieves the effect of single-instance applications seen on common GUI desktops. * The default look-and-feel for Linux is the traditional "Metal", which works better with GUI scaling for very high-resolution displays (e.g. 4K). Moreover, it is generally more robust than "Nimbus". * Update to jedit-5.3.0, with improved GUI scaling and support of high-resolution displays (e.g. 4K). * The main Isabelle executable is managed as single-instance Desktop application uniformly on all platforms: Linux, Windows, Mac OS X. *** Document preparation *** * Commands 'paragraph' and 'subparagraph' provide additional section headings. Thus there are 6 levels of standard headings, as in HTML. * Command 'text_raw' has been clarified: input text is processed as in 'text' (with antiquotations and control symbols). The key difference is the lack of the surrounding isabelle markup environment in output. * Text is structured in paragraphs and nested lists, using notation that is similar to Markdown. The control symbols for list items are as follows: \<^item> itemize \<^enum> enumerate \<^descr> description * There is a new short form for antiquotations with a single argument that is a cartouche: \<^name>\...\ is equivalent to @{name \...\} and \...\ without control symbol is equivalent to @{cartouche \...\}. \<^name> without following cartouche is equivalent to @{name}. The standard Isabelle fonts provide glyphs to render important control symbols, e.g. "\<^verbatim>", "\<^emph>", "\<^bold>". * Antiquotations @{noindent}, @{smallskip}, @{medskip}, @{bigskip} with corresponding control symbols \<^noindent>, \<^smallskip>, \<^medskip>, \<^bigskip> specify spacing formally, using standard LaTeX macros of the same names. * Antiquotation @{cartouche} in Isabelle/Pure is the same as @{text}. Consequently, \...\ without any decoration prints literal quasi-formal text. Command-line tool "isabelle update_cartouches -t" helps to update old sources, by approximative patching of the content of string and cartouche tokens seen in theory sources. * The @{text} antiquotation now ignores the antiquotation option "source". The given text content is output unconditionally, without any surrounding quotes etc. Subtle INCOMPATIBILITY, put quotes into the argument where they are really intended, e.g. @{text \"foo"\}. Initial or terminal spaces are ignored. * Antiquotations @{emph} and @{bold} output LaTeX source recursively, adding appropriate text style markup. These may be used in the short form \<^emph>\...\ and \<^bold>\...\. * Document antiquotation @{footnote} outputs LaTeX source recursively, marked as \footnote{}. This may be used in the short form \<^footnote>\...\. * Antiquotation @{verbatim [display]} supports option "indent". * Antiquotation @{theory_text} prints uninterpreted theory source text (Isar outer syntax with command keywords etc.). This may be used in the short form \<^theory_text>\...\. @{theory_text [display]} supports option "indent". * Antiquotation @{doc ENTRY} provides a reference to the given documentation, with a hyperlink in the Prover IDE. * Antiquotations @{command}, @{method}, @{attribute} print checked entities of the Isar language. * HTML presentation uses the standard IsabelleText font and Unicode rendering of Isabelle symbols like Isabelle/Scala/jEdit. The former print mode "HTML" loses its special meaning. *** Isar *** * Local goals ('have', 'show', 'hence', 'thus') allow structured rule statements like fixes/assumes/shows in theorem specifications, but the notation is postfix with keywords 'if' (or 'when') and 'for'. For example: have result: "C x y" if "A x" and "B y" for x :: 'a and y :: 'a The local assumptions are bound to the name "that". The result is exported from context of the statement as usual. The above roughly corresponds to a raw proof block like this: { fix x :: 'a and y :: 'a assume that: "A x" "B y" have "C x y" } note result = this The keyword 'when' may be used instead of 'if', to indicate 'presume' instead of 'assume' above. * Assumptions ('assume', 'presume') allow structured rule statements using 'if' and 'for', similar to 'have' etc. above. For example: assume result: "C x y" if "A x" and "B y" for x :: 'a and y :: 'a This assumes "\x y::'a. A x \ B y \ C x y" and produces a general result as usual: "A ?x \ B ?y \ C ?x ?y". Vacuous quantification in assumptions is omitted, i.e. a for-context only effects propositions according to actual use of variables. For example: assume "A x" and "B y" for x and y is equivalent to: assume "\x. A x" and "\y. B y" * The meaning of 'show' with Pure rule statements has changed: premises are treated in the sense of 'assume', instead of 'presume'. This means, a goal like "\x. A x \ B x \ C x" can be solved completely as follows: show "\x. A x \ B x \ C x" or: show "C x" if "A x" "B x" for x Rare INCOMPATIBILITY, the old behaviour may be recovered as follows: show "C x" when "A x" "B x" for x * New command 'consider' states rules for generalized elimination and case splitting. This is like a toplevel statement "theorem obtains" used within a proof body; or like a multi-branch 'obtain' without activation of the local context elements yet. * Proof method "cases" allows to specify the rule as first entry of chained facts. This is particularly useful with 'consider': consider (a) A | (b) B | (c) C then have something proof cases case a then show ?thesis next case b then show ?thesis next case c then show ?thesis qed * Command 'case' allows fact name and attribute specification like this: case a: (c xs) case a [attributes]: (c xs) Facts that are introduced by invoking the case context are uniformly qualified by "a"; the same name is used for the cumulative fact. The old form "case (c xs) [attributes]" is no longer supported. Rare INCOMPATIBILITY, need to adapt uses of case facts in exotic situations, and always put attributes in front. * The standard proof method of commands 'proof' and '..' is now called "standard" to make semantically clear what it is; the old name "default" is still available as legacy for some time. Documentation now explains '..' more accurately as "by standard" instead of "by rule". * Nesting of Isar goal structure has been clarified: the context after the initial backwards refinement is retained for the whole proof, within all its context sections (as indicated via 'next'). This is e.g. relevant for 'using', 'including', 'supply': have "A \ A" if a: A for A supply [simp] = a proof show A by simp next show A by simp qed * Command 'obtain' binds term abbreviations (via 'is' patterns) in the proof body as well, abstracted over relevant parameters. * Improved type-inference for theorem statement 'obtains': separate parameter scope for of each clause. * Term abbreviations via 'is' patterns also work for schematic statements: result is abstracted over unknowns. * Command 'subgoal' allows to impose some structure on backward refinements, to avoid proof scripts degenerating into long of 'apply' sequences. Further explanations and examples are given in the isar-ref manual. * Command 'supply' supports fact definitions during goal refinement ('apply' scripts). * Proof method "goal_cases" turns the current subgoals into cases within the context; the conclusion is bound to variable ?case in each case. For example: lemma "\x. A x \ B x \ C x" and "\y z. U y \ V z \ W y z" proof goal_cases case (1 x) then show ?case using \A x\ \B x\ sorry next case (2 y z) then show ?case using \U y\ \V z\ sorry qed lemma "\x. A x \ B x \ C x" and "\y z. U y \ V z \ W y z" proof goal_cases case prems: 1 then show ?case using prems sorry next case prems: 2 then show ?case using prems sorry qed * The undocumented feature of implicit cases goal1, goal2, goal3, etc. is marked as legacy, and will be removed eventually. The proof method "goals" achieves a similar effect within regular Isar; often it can be done more adequately by other means (e.g. 'consider'). * The vacuous fact "TERM x" may be established "by fact" or as `TERM x` as well, not just "by this" or "." as before. * Method "sleep" succeeds after a real-time delay (in seconds). This is occasionally useful for demonstration and testing purposes. *** Pure *** * Qualifiers in locale expressions default to mandatory ('!') regardless of the command. Previously, for 'locale' and 'sublocale' the default was optional ('?'). The old synatx '!' has been discontinued. INCOMPATIBILITY, remove '!' and add '?' as required. * Keyword 'rewrites' identifies rewrite morphisms in interpretation commands. Previously, the keyword was 'where'. INCOMPATIBILITY. * More gentle suppression of syntax along locale morphisms while printing terms. Previously 'abbreviation' and 'notation' declarations would be suppressed for morphisms except term identity. Now 'abbreviation' is also kept for morphims that only change the involved parameters, and only 'notation' is suppressed. This can be of great help when working with complex locale hierarchies, because proof states are displayed much more succinctly. It also means that only notation needs to be redeclared if desired, as illustrated by this example: locale struct = fixes composition :: "'a => 'a => 'a" (infixl "\" 65) begin definition derived (infixl "\" 65) where ... end locale morphism = left: struct composition + right: struct composition' for composition (infix "\" 65) and composition' (infix "\''" 65) begin notation right.derived ("\''") end * Command 'global_interpretation' issues interpretations into global theories, with optional rewrite definitions following keyword 'defines'. * Command 'sublocale' accepts optional rewrite definitions after keyword 'defines'. * Command 'permanent_interpretation' has been discontinued. Use 'global_interpretation' or 'sublocale' instead. INCOMPATIBILITY. * Command 'print_definitions' prints dependencies of definitional specifications. This functionality used to be part of 'print_theory'. * Configuration option rule_insts_schematic has been discontinued (intermediate legacy feature in Isabelle2015). INCOMPATIBILITY. * Abbreviations in type classes now carry proper sort constraint. Rare INCOMPATIBILITY in situations where the previous misbehaviour has been exploited. * Refinement of user-space type system in type classes: pseudo-local operations behave more similar to abbreviations. Potential INCOMPATIBILITY in exotic situations. *** HOL *** * The 'typedef' command has been upgraded from a partially checked "axiomatization", to a full definitional specification that takes the global collection of overloaded constant / type definitions into account. Type definitions with open dependencies on overloaded definitions need to be specified as "typedef (overloaded)". This provides extra robustness in theory construction. Rare INCOMPATIBILITY. * Qualification of various formal entities in the libraries is done more uniformly via "context begin qualified definition ... end" instead of old-style "hide_const (open) ...". Consequently, both the defined constant and its defining fact become qualified, e.g. Option.is_none and Option.is_none_def. Occasional INCOMPATIBILITY in applications. * Some old and rarely used ASCII replacement syntax has been removed. INCOMPATIBILITY, standard syntax with symbols should be used instead. The subsequent commands help to reproduce the old forms, e.g. to simplify porting old theories: notation iff (infixr "<->" 25) notation Times (infixr "<*>" 80) type_notation Map.map (infixr "~=>" 0) notation Map.map_comp (infixl "o'_m" 55) type_notation FinFun.finfun ("(_ =>f /_)" [22, 21] 21) notation FuncSet.funcset (infixr "->" 60) notation FuncSet.extensional_funcset (infixr "->\<^sub>E" 60) notation Omega_Words_Fun.conc (infixr "conc" 65) notation Preorder.equiv ("op ~~") and Preorder.equiv ("(_/ ~~ _)" [51, 51] 50) notation (in topological_space) tendsto (infixr "--->" 55) notation (in topological_space) LIMSEQ ("((_)/ ----> (_))" [60, 60] 60) notation LIM ("((_)/ -- (_)/ --> (_))" [60, 0, 60] 60) notation NSA.approx (infixl "@=" 50) notation NSLIMSEQ ("((_)/ ----NS> (_))" [60, 60] 60) notation NSLIM ("((_)/ -- (_)/ --NS> (_))" [60, 0, 60] 60) * The alternative notation "\" for type and sort constraints has been removed: in LaTeX document output it looks the same as "::". INCOMPATIBILITY, use plain "::" instead. * Commands 'inductive' and 'inductive_set' work better when names for intro rules are omitted: the "cases" and "induct" rules no longer declare empty case_names, but no case_names at all. This allows to use numbered cases in proofs, without requiring method "goal_cases". * Inductive definitions ('inductive', 'coinductive', etc.) expose low-level facts of the internal construction only if the option "inductive_internals" is enabled. This refers to the internal predicate definition and its monotonicity result. Rare INCOMPATIBILITY. * Recursive function definitions ('fun', 'function', 'partial_function') expose low-level facts of the internal construction only if the option "function_internals" is enabled. Its internal inductive definition is also subject to "inductive_internals". Rare INCOMPATIBILITY. * BNF datatypes ('datatype', 'codatatype', etc.) expose low-level facts of the internal construction only if the option "bnf_internals" is enabled. This supersedes the former option "bnf_note_all". Rare INCOMPATIBILITY. * Combinator to represent case distinction on products is named "case_prod", uniformly, discontinuing any input aliasses. Very popular theorem aliasses have been retained. Consolidated facts: PairE ~> prod.exhaust Pair_eq ~> prod.inject pair_collapse ~> prod.collapse Pair_fst_snd_eq ~> prod_eq_iff split_twice ~> prod.case_distrib split_weak_cong ~> prod.case_cong_weak split_split ~> prod.split split_split_asm ~> prod.split_asm splitI ~> case_prodI splitD ~> case_prodD splitI2 ~> case_prodI2 splitI2' ~> case_prodI2' splitE ~> case_prodE splitE' ~> case_prodE' split_pair ~> case_prod_Pair split_eta ~> case_prod_eta split_comp ~> case_prod_comp mem_splitI ~> mem_case_prodI mem_splitI2 ~> mem_case_prodI2 mem_splitE ~> mem_case_prodE The_split ~> The_case_prod cond_split_eta ~> cond_case_prod_eta Collect_split_in_rel_leE ~> Collect_case_prod_in_rel_leE Collect_split_in_rel_leI ~> Collect_case_prod_in_rel_leI in_rel_Collect_split_eq ~> in_rel_Collect_case_prod_eq Collect_split_Grp_eqD ~> Collect_case_prod_Grp_eqD Collect_split_Grp_inD ~> Collect_case_prod_Grp_in Domain_Collect_split ~> Domain_Collect_case_prod Image_Collect_split ~> Image_Collect_case_prod Range_Collect_split ~> Range_Collect_case_prod Eps_split ~> Eps_case_prod Eps_split_eq ~> Eps_case_prod_eq split_rsp ~> case_prod_rsp curry_split ~> curry_case_prod split_curry ~> case_prod_curry Changes in structure HOLogic: split_const ~> case_prod_const mk_split ~> mk_case_prod mk_psplits ~> mk_ptupleabs strip_psplits ~> strip_ptupleabs INCOMPATIBILITY. * The coercions to type 'real' have been reorganised. The function 'real' is no longer overloaded, but has type 'nat => real' and abbreviates of_nat for that type. Also 'real_of_int :: int => real' abbreviates of_int for that type. Other overloaded instances of 'real' have been replaced by 'real_of_ereal' and 'real_of_float'. Consolidated facts (among others): real_of_nat_le_iff -> of_nat_le_iff real_of_nat_numeral of_nat_numeral real_of_int_zero of_int_0 real_of_nat_zero of_nat_0 real_of_one of_int_1 real_of_int_add of_int_add real_of_nat_add of_nat_add real_of_int_diff of_int_diff real_of_nat_diff of_nat_diff floor_subtract floor_diff_of_int real_of_int_inject of_int_eq_iff real_of_int_gt_zero_cancel_iff of_int_0_less_iff real_of_int_ge_zero_cancel_iff of_int_0_le_iff real_of_nat_ge_zero of_nat_0_le_iff real_of_int_ceiling_ge le_of_int_ceiling ceiling_less_eq ceiling_less_iff ceiling_le_eq ceiling_le_iff less_floor_eq less_floor_iff floor_less_eq floor_less_iff floor_divide_eq_div floor_divide_of_int_eq real_of_int_zero_cancel of_nat_eq_0_iff ceiling_real_of_int ceiling_of_int INCOMPATIBILITY. * Theory Map: lemma map_of_is_SomeD was a clone of map_of_SomeD and has been removed. INCOMPATIBILITY. * Quickcheck setup for finite sets. * Discontinued simp_legacy_precond. Potential INCOMPATIBILITY. * Sledgehammer: - The MaSh relevance filter has been sped up. - Proof reconstruction has been improved, to minimize the incidence of cases where Sledgehammer gives a proof that does not work. - Auto Sledgehammer now minimizes and preplays the results. - Handle Vampire 4.0 proof output without raising exception. - Eliminated "MASH" environment variable. Use the "MaSh" option in Isabelle/jEdit instead. INCOMPATIBILITY. - Eliminated obsolete "blocking" option and related subcommands. * Nitpick: - Fixed soundness bug in translation of "finite" predicate. - Fixed soundness bug in "destroy_constrs" optimization. - Fixed soundness bug in translation of "rat" type. - Removed "check_potential" and "check_genuine" options. - Eliminated obsolete "blocking" option. * (Co)datatype package: - New commands "lift_bnf" and "copy_bnf" for lifting (copying) a BNF structure on the raw type to an abstract type defined using typedef. - Always generate "case_transfer" theorem. - For mutual types, generate slightly stronger "rel_induct", "rel_coinduct", and "coinduct" theorems. INCOMPATIBILITY. - Allow discriminators and selectors with the same name as the type being defined. - Avoid various internal name clashes (e.g., 'datatype f = f'). * Transfer: new methods for interactive debugging of 'transfer' and 'transfer_prover': 'transfer_start', 'transfer_step', 'transfer_end', 'transfer_prover_start' and 'transfer_prover_end'. * New diagnostic command print_record for displaying record definitions. * Division on integers is bootstrapped directly from division on naturals and uses generic numeral algorithm for computations. Slight INCOMPATIBILITY, simproc numeral_divmod replaces and generalizes former simprocs binary_int_div and binary_int_mod * Tightened specification of class semiring_no_zero_divisors. Minor INCOMPATIBILITY. * Class algebraic_semidom introduces common algebraic notions of integral (semi)domains, particularly units. Although logically subsumed by fields, is is not a super class of these in order not to burden fields with notions that are trivial there. * Class normalization_semidom specifies canonical representants for equivalence classes of associated elements in an integral (semi)domain. This formalizes associated elements as well. * Abstract specification of gcd/lcm operations in classes semiring_gcd, semiring_Gcd, semiring_Lcd. Minor INCOMPATIBILITY: facts gcd_nat.commute and gcd_int.commute are subsumed by gcd.commute, as well as gcd_nat.assoc and gcd_int.assoc by gcd.assoc. * Former constants Fields.divide (_ / _) and Divides.div (_ div _) are logically unified to Rings.divide in syntactic type class Rings.divide, with infix syntax (_ div _). Infix syntax (_ / _) for field division is added later as abbreviation in class Fields.inverse. INCOMPATIBILITY, instantiations must refer to Rings.divide rather than the former separate constants, hence infix syntax (_ / _) is usually not available during instantiation. * New cancellation simprocs for boolean algebras to cancel complementary terms for sup and inf. For example, "sup x (sup y (- x))" simplifies to "top". INCOMPATIBILITY. * Class uniform_space introduces uniform spaces btw topological spaces and metric spaces. Minor INCOMPATIBILITY: open__def needs to be introduced in the form of an uniformity. Some constants are more general now, it may be necessary to add type class constraints. open_real_def \ open_dist open_complex_def \ open_dist * Library/Monad_Syntax: notation uses symbols \ and \. INCOMPATIBILITY. * Library/Multiset: - Renamed multiset inclusion operators: < ~> <# > ~> ># <= ~> <=# >= ~> >=# \ ~> \# \ ~> \# INCOMPATIBILITY. - Added multiset inclusion operator syntax: \# \# \# \# - "'a multiset" is no longer an instance of the "order", "ordered_ab_semigroup_add_imp_le", "ordered_cancel_comm_monoid_diff", "semilattice_inf", and "semilattice_sup" type classes. The theorems previously provided by these type classes (directly or indirectly) are now available through the "subset_mset" interpretation (e.g. add_mono ~> subset_mset.add_mono). INCOMPATIBILITY. - Renamed conversions: multiset_of ~> mset multiset_of_set ~> mset_set set_of ~> set_mset INCOMPATIBILITY - Renamed lemmas: mset_le_def ~> subseteq_mset_def mset_less_def ~> subset_mset_def less_eq_multiset.rep_eq ~> subseteq_mset_def INCOMPATIBILITY - Removed lemmas generated by lift_definition: less_eq_multiset.abs_eq, less_eq_multiset.rsp, less_eq_multiset.transfer, less_eq_multiset_def INCOMPATIBILITY * Library/Omega_Words_Fun: Infinite words modeled as functions nat \ 'a. * Library/Bourbaki_Witt_Fixpoint: Added formalisation of the Bourbaki-Witt fixpoint theorem for increasing functions in chain-complete partial orders. * Library/Old_Recdef: discontinued obsolete 'defer_recdef' command. Minor INCOMPATIBILITY, use 'function' instead. * Library/Periodic_Fun: a locale that provides convenient lemmas for periodic functions. * Library/Formal_Power_Series: proper definition of division (with remainder) for formal power series; instances for Euclidean Ring and GCD. * HOL-Imperative_HOL: obsolete theory Legacy_Mrec has been removed. * HOL-Statespace: command 'statespace' uses mandatory qualifier for import of parent, as for general 'locale' expressions. INCOMPATIBILITY, remove '!' and add '?' as required. * HOL-Decision_Procs: The "approximation" method works with "powr" (exponentiation on real numbers) again. * HOL-Multivariate_Analysis: theory Cauchy_Integral_Thm with Contour integrals (= complex path integrals), Cauchy's integral theorem, winding numbers and Cauchy's integral formula, Liouville theorem, Fundamental Theorem of Algebra. Ported from HOL Light. * HOL-Multivariate_Analysis: topological concepts such as connected components, homotopic paths and the inside or outside of a set. * HOL-Multivariate_Analysis: radius of convergence of power series and various summability tests; Harmonic numbers and the Euler–Mascheroni constant; the Generalised Binomial Theorem; the complex and real Gamma/log-Gamma/Digamma/ Polygamma functions and their most important properties. * HOL-Probability: The central limit theorem based on Levy's uniqueness and continuity theorems, weak convergence, and characterisitc functions. * HOL-Data_Structures: new and growing session of standard data structures. *** ML *** * The following combinators for low-level profiling of the ML runtime system are available: profile_time (*CPU time*) profile_time_thread (*CPU time on this thread*) profile_allocations (*overall heap allocations*) * Antiquotation @{undefined} or \<^undefined> inlines (raise Match). * Antiquotation @{method NAME} inlines the (checked) name of the given Isar proof method. * Pretty printing of Poly/ML compiler output in Isabelle has been improved: proper treatment of break offsets and blocks with consistent breaks. * The auxiliary module Pure/display.ML has been eliminated. Its elementary thm print operations are now in Pure/more_thm.ML and thus called Thm.pretty_thm, Thm.string_of_thm etc. INCOMPATIBILITY. * Simproc programming interfaces have been simplified: Simplifier.make_simproc and Simplifier.define_simproc supersede various forms of Simplifier.mk_simproc, Simplifier.simproc_global etc. Note that term patterns for the left-hand sides are specified with implicitly fixed variables, like top-level theorem statements. INCOMPATIBILITY. * Instantiation rules have been re-organized as follows: Thm.instantiate (*low-level instantiation with named arguments*) Thm.instantiate' (*version with positional arguments*) Drule.infer_instantiate (*instantiation with type inference*) Drule.infer_instantiate' (*version with positional arguments*) The LHS only requires variable specifications, instead of full terms. Old cterm_instantiate is superseded by infer_instantiate. INCOMPATIBILITY, need to re-adjust some ML names and types accordingly. * Old tactic shorthands atac, rtac, etac, dtac, ftac have been discontinued. INCOMPATIBILITY, use regular assume_tac, resolve_tac etc. instead (with proper context). * Thm.instantiate (and derivatives) no longer require the LHS of the instantiation to be certified: plain variables are given directly. * Subgoal.SUBPROOF and Subgoal.FOCUS combinators use anonymous quasi-bound variables (like the Simplifier), instead of accidentally named local fixes. This has the potential to improve stability of proof tools, but can also cause INCOMPATIBILITY for tools that don't observe the proof context discipline. * Isar proof methods are based on a slightly more general type context_tactic, which allows to change the proof context dynamically (e.g. to update cases) and indicate explicit Seq.Error results. Former METHOD_CASES is superseded by CONTEXT_METHOD; further combinators are provided in src/Pure/Isar/method.ML for convenience. INCOMPATIBILITY. *** System *** * Command-line tool "isabelle console" enables print mode "ASCII". * Command-line tool "isabelle update_then" expands old Isar command conflations: hence ~> then have thus ~> then show This syntax is more orthogonal and improves readability and maintainability of proofs. * Global session timeout is multiplied by timeout_scale factor. This allows to adjust large-scale tests (e.g. AFP) to overall hardware performance. * Property values in etc/symbols may contain spaces, if written with the replacement character "␣" (Unicode point 0x2324). For example: \ code: 0x0022c6 group: operator font: Deja␣Vu␣Sans␣Mono * Java runtime environment for x86_64-windows allows to use larger heap space. * Java runtime options are determined separately for 32bit vs. 64bit platforms as follows. - Isabelle desktop application: platform-specific files that are associated with the main app bundle - isabelle jedit: settings JEDIT_JAVA_SYSTEM_OPTIONS JEDIT_JAVA_OPTIONS32 vs. JEDIT_JAVA_OPTIONS64 - isabelle build: settings ISABELLE_BUILD_JAVA_OPTIONS32 vs. ISABELLE_BUILD_JAVA_OPTIONS64 * Bash shell function "jvmpath" has been renamed to "platform_path": it is relevant both for Poly/ML and JVM processes. * Poly/ML default platform architecture may be changed from 32bit to 64bit via system option ML_system_64. A system restart (and rebuild) is required after change. * Poly/ML 5.6 runs natively on x86-windows and x86_64-windows, which both allow larger heap space than former x86-cygwin. * Heap images are 10-15% smaller due to less wasteful persistent theory content (using ML type theory_id instead of theory); New in Isabelle2015 (May 2015) ------------------------------ *** General *** * Local theory specification commands may have a 'private' or 'qualified' modifier to restrict name space accesses to the local scope, as provided by some "context begin ... end" block. For example: context begin private definition ... private lemma ... qualified definition ... qualified lemma ... lemma ... theorem ... end * Command 'experiment' opens an anonymous locale context with private naming policy. * Command 'notepad' requires proper nesting of begin/end and its proof structure in the body: 'oops' is no longer supported here. Minor INCOMPATIBILITY, use 'sorry' instead. * Command 'named_theorems' declares a dynamic fact within the context, together with an attribute to maintain the content incrementally. This supersedes functor Named_Thms in Isabelle/ML, but with a subtle change of semantics due to external visual order vs. internal reverse order. * 'find_theorems': search patterns which are abstractions are schematically expanded before search. Search results match the naive expectation more closely, particularly wrt. abbreviations. INCOMPATIBILITY. * Commands 'method_setup' and 'attribute_setup' now work within a local theory context. * Outer syntax commands are managed authentically within the theory context, without implicit global state. Potential for accidental INCOMPATIBILITY, make sure that required theories are really imported. * Historical command-line terminator ";" is no longer accepted (and already used differently in Isar). Minor INCOMPATIBILITY, use "isabelle update_semicolons" to remove obsolete semicolons from old theory sources. * Structural composition of proof methods (meth1; meth2) in Isar corresponds to (tac1 THEN_ALL_NEW tac2) in ML. * The Eisbach proof method language allows to define new proof methods by combining existing ones with their usual syntax. The "match" proof method provides basic fact/term matching in addition to premise/conclusion matching through Subgoal.focus, and binds fact names from matches as well as term patterns within matches. The Isabelle documentation provides an entry "eisbach" for the Eisbach User Manual. Sources and various examples are in ~~/src/HOL/Eisbach/. *** Prover IDE -- Isabelle/Scala/jEdit *** * Improved folding mode "isabelle" based on Isar syntax. Alternatively, the "sidekick" mode may be used for document structure. * Extended bracket matching based on Isar language structure. System option jedit_structure_limit determines maximum number of lines to scan in the buffer. * Support for BibTeX files: context menu, context-sensitive token marker, SideKick parser. * Document antiquotation @{cite} provides formal markup, which is interpreted semi-formally based on .bib files that happen to be open in the editor (hyperlinks, completion etc.). * Less waste of vertical space via negative line spacing (see Global Options / Text Area). * Improved graphview panel with optional output of PNG or PDF, for display of 'thy_deps', 'class_deps' etc. * The commands 'thy_deps' and 'class_deps' allow optional bounds to restrict the visualized hierarchy. * Improved scheduling for asynchronous print commands (e.g. provers managed by the Sledgehammer panel) wrt. ongoing document processing. *** Document preparation *** * Document markup commands 'chapter', 'section', 'subsection', 'subsubsection', 'text', 'txt', 'text_raw' work uniformly in any context, even before the initial 'theory' command. Obsolete proof commands 'sect', 'subsect', 'subsubsect', 'txt_raw' have been discontinued, use 'section', 'subsection', 'subsubsection', 'text_raw' instead. The old 'header' command is still retained for some time, but should be replaced by 'chapter', 'section' etc. (using "isabelle update_header"). Minor INCOMPATIBILITY. * Official support for "tt" style variants, via \isatt{...} or \begin{isabellett}...\end{isabellett}. The somewhat fragile \verb or verbatim environment of LaTeX is no longer used. This allows @{ML} etc. as argument to other macros (such as footnotes). * Document antiquotation @{verbatim} prints ASCII text literally in "tt" style. * Discontinued obsolete option "document_graph": session_graph.pdf is produced unconditionally for HTML browser_info and PDF-LaTeX document. * Diagnostic commands and document markup commands within a proof do not affect the command tag for output. Thus commands like 'thm' are subject to proof document structure, and no longer "stick out" accidentally. Commands 'text' and 'txt' merely differ in the LaTeX style, not their tags. Potential INCOMPATIBILITY in exotic situations. * System option "pretty_margin" is superseded by "thy_output_margin", which is also accessible via document antiquotation option "margin". Only the margin for document output may be changed, but not the global pretty printing: that is 76 for plain console output, and adapted dynamically in GUI front-ends. Implementations of document antiquotations need to observe the margin explicitly according to Thy_Output.string_of_margin. Minor INCOMPATIBILITY. * Specification of 'document_files' in the session ROOT file is mandatory for document preparation. The legacy mode with implicit copying of the document/ directory is no longer supported. Minor INCOMPATIBILITY. *** Pure *** * Proof methods with explicit instantiation ("rule_tac", "subgoal_tac" etc.) allow an optional context of local variables ('for' declaration): these variables become schematic in the instantiated theorem; this behaviour is analogous to 'for' in attributes "where" and "of". Configuration option rule_insts_schematic (default false) controls use of schematic variables outside the context. Minor INCOMPATIBILITY, declare rule_insts_schematic = true temporarily and update to use local variable declarations or dummy patterns instead. * Explicit instantiation via attributes "where", "of", and proof methods "rule_tac" with derivatives like "subgoal_tac" etc. admit dummy patterns ("_") that stand for anonymous local variables. * Generated schematic variables in standard format of exported facts are incremented to avoid material in the proof context. Rare INCOMPATIBILITY, explicit instantiation sometimes needs to refer to different index. * Lexical separation of signed and unsigned numerals: categories "num" and "float" are unsigned. INCOMPATIBILITY: subtle change in precedence of numeral signs, particularly in expressions involving infix syntax like "(- 1) ^ n". * Old inner token category "xnum" has been discontinued. Potential INCOMPATIBILITY for exotic syntax: may use mixfix grammar with "num" token category instead. *** HOL *** * New (co)datatype package: - The 'datatype_new' command has been renamed 'datatype'. The old command of that name is now called 'old_datatype' and is provided by "~~/src/HOL/Library/Old_Datatype.thy". See 'isabelle doc datatypes' for information on porting. INCOMPATIBILITY. - Renamed theorems: disc_corec ~> corec_disc disc_corec_iff ~> corec_disc_iff disc_exclude ~> distinct_disc disc_exhaust ~> exhaust_disc disc_map_iff ~> map_disc_iff sel_corec ~> corec_sel sel_exhaust ~> exhaust_sel sel_map ~> map_sel sel_set ~> set_sel sel_split ~> split_sel sel_split_asm ~> split_sel_asm strong_coinduct ~> coinduct_strong weak_case_cong ~> case_cong_weak INCOMPATIBILITY. - The "no_code" option to "free_constructors", "datatype_new", and "codatatype" has been renamed "plugins del: code". INCOMPATIBILITY. - The rules "set_empty" have been removed. They are easy consequences of other set rules "by auto". INCOMPATIBILITY. - The rule "set_cases" is now registered with the "[cases set]" attribute. This can influence the behavior of the "cases" proof method when more than one case rule is applicable (e.g., an assumption is of the form "w : set ws" and the method "cases w" is invoked). The solution is to specify the case rule explicitly (e.g. "cases w rule: widget.exhaust"). INCOMPATIBILITY. - Renamed theories: BNF_Comp ~> BNF_Composition BNF_FP_Base ~> BNF_Fixpoint_Base BNF_GFP ~> BNF_Greatest_Fixpoint BNF_LFP ~> BNF_Least_Fixpoint BNF_Constructions_on_Wellorders ~> BNF_Wellorder_Constructions Cardinals/Constructions_on_Wellorders ~> Cardinals/Wellorder_Constructions INCOMPATIBILITY. - Lifting and Transfer setup for basic HOL types sum and prod (also option) is now performed by the BNF package. Theories Lifting_Sum, Lifting_Product and Lifting_Option from Main became obsolete and were removed. Changed definitions of the relators rel_prod and rel_sum (using inductive). INCOMPATIBILITY: use rel_prod.simps and rel_sum.simps instead of rel_prod_def and rel_sum_def. Minor INCOMPATIBILITY: (rarely used by name) transfer theorem names changed (e.g. map_prod_transfer ~> prod.map_transfer). - Parametricity theorems for map functions, relators, set functions, constructors, case combinators, discriminators, selectors and (co)recursors are automatically proved and registered as transfer rules. * Old datatype package: - The old 'datatype' command has been renamed 'old_datatype', and 'rep_datatype' has been renamed 'old_rep_datatype'. They are provided by "~~/src/HOL/Library/Old_Datatype.thy". See 'isabelle doc datatypes' for information on porting. INCOMPATIBILITY. - Renamed theorems: weak_case_cong ~> case_cong_weak INCOMPATIBILITY. - Renamed theory: ~~/src/HOL/Datatype.thy ~> ~~/src/HOL/Library/Old_Datatype.thy INCOMPATIBILITY. * Nitpick: - Fixed soundness bug related to the strict and non-strict subset operations. * Sledgehammer: - CVC4 is now included with Isabelle instead of CVC3 and run by default. - Z3 is now always enabled by default, now that it is fully open source. The "z3_non_commercial" option is discontinued. - Minimization is now always enabled by default. Removed sub-command: min - Proof reconstruction, both one-liners and Isar, has been dramatically improved. - Improved support for CVC4 and veriT. * Old and new SMT modules: - The old 'smt' method has been renamed 'old_smt' and moved to 'src/HOL/Library/Old_SMT.thy'. It is provided for compatibility, until applications have been ported to use the new 'smt' method. For the method to work, an older version of Z3 (e.g. Z3 3.2 or 4.0) must be installed, and the environment variable "OLD_Z3_SOLVER" must point to it. INCOMPATIBILITY. - The 'smt2' method has been renamed 'smt'. INCOMPATIBILITY. - New option 'smt_reconstruction_step_timeout' to limit the reconstruction time of Z3 proof steps in the new 'smt' method. - New option 'smt_statistics' to display statistics of the new 'smt' method, especially runtime statistics of Z3 proof reconstruction. * Lifting: command 'lift_definition' allows to execute lifted constants that have as a return type a datatype containing a subtype. This overcomes long-time limitations in the area of code generation and lifting, and avoids tedious workarounds. * Command and antiquotation "value" provide different evaluation slots (again), where the previous strategy (NBE after ML) serves as default. Minor INCOMPATIBILITY. * Add NO_MATCH-simproc, allows to check for syntactic non-equality. * field_simps: Use NO_MATCH-simproc for distribution rules, to avoid non-termination in case of distributing a division. With this change field_simps is in some cases slightly less powerful, if it fails try to add algebra_simps, or use divide_simps. Minor INCOMPATIBILITY. * Separate class no_zero_divisors has been given up in favour of fully algebraic semiring_no_zero_divisors. INCOMPATIBILITY. * Class linordered_semidom really requires no zero divisors. INCOMPATIBILITY. * Classes division_ring, field and linordered_field always demand "inverse 0 = 0". Given up separate classes division_ring_inverse_zero, field_inverse_zero and linordered_field_inverse_zero. INCOMPATIBILITY. * Classes cancel_ab_semigroup_add / cancel_monoid_add specify explicit additive inverse operation. INCOMPATIBILITY. * Complex powers and square roots. The functions "ln" and "powr" are now overloaded for types real and complex, and 0 powr y = 0 by definition. INCOMPATIBILITY: type constraints may be necessary. * The functions "sin" and "cos" are now defined for any type of sort "{real_normed_algebra_1,banach}" type, so in particular on "real" and "complex" uniformly. Minor INCOMPATIBILITY: type constraints may be needed. * New library of properties of the complex transcendental functions sin, cos, tan, exp, Ln, Arctan, Arcsin, Arccos. Ported from HOL Light. * The factorial function, "fact", now has type "nat => 'a" (of a sort that admits numeric types including nat, int, real and complex. INCOMPATIBILITY: an expression such as "fact 3 = 6" may require a type constraint, and the combination "real (fact k)" is likely to be unsatisfactory. If a type conversion is still necessary, then use "of_nat (fact k)" or "real_of_nat (fact k)". * Removed functions "natfloor" and "natceiling", use "nat o floor" and "nat o ceiling" instead. A few of the lemmas have been retained and adapted: in their names "natfloor"/"natceiling" has been replaced by "nat_floor"/"nat_ceiling". * Qualified some duplicated fact names required for boostrapping the type class hierarchy: ab_add_uminus_conv_diff ~> diff_conv_add_uminus field_inverse_zero ~> inverse_zero field_divide_inverse ~> divide_inverse field_inverse ~> left_inverse Minor INCOMPATIBILITY. * Eliminated fact duplicates: mult_less_imp_less_right ~> mult_right_less_imp_less mult_less_imp_less_left ~> mult_left_less_imp_less Minor INCOMPATIBILITY. * Fact consolidation: even_less_0_iff is subsumed by double_add_less_zero_iff_single_add_less_zero (simp by default anyway). * Generalized and consolidated some theorems concerning divsibility: dvd_reduce ~> dvd_add_triv_right_iff dvd_plus_eq_right ~> dvd_add_right_iff dvd_plus_eq_left ~> dvd_add_left_iff Minor INCOMPATIBILITY. * "even" and "odd" are mere abbreviations for "2 dvd _" and "~ 2 dvd _" and part of theory Main. even_def ~> even_iff_mod_2_eq_zero INCOMPATIBILITY. * Lemma name consolidation: divide_Numeral1 ~> divide_numeral_1. Minor INCOMPATIBILITY. * Bootstrap of listsum as special case of abstract product over lists. Fact rename: listsum_def ~> listsum.eq_foldr INCOMPATIBILITY. * Product over lists via constant "listprod". * Theory List: renamed drop_Suc_conv_tl and nth_drop' to Cons_nth_drop_Suc. * New infrastructure for compiling, running, evaluating and testing generated code in target languages in HOL/Library/Code_Test. See HOL/Codegenerator_Test/Code_Test* for examples. * Library/Multiset: - Introduced "replicate_mset" operation. - Introduced alternative characterizations of the multiset ordering in "Library/Multiset_Order". - Renamed multiset ordering: <# ~> #<# <=# ~> #<=# \# ~> #\# \# ~> #\# INCOMPATIBILITY. - Introduced abbreviations for ill-named multiset operations: <#, \# abbreviate < (strict subset) <=#, \#, \# abbreviate <= (subset or equal) INCOMPATIBILITY. - Renamed in_multiset_of ~> in_multiset_in_set Multiset.fold ~> fold_mset Multiset.filter ~> filter_mset INCOMPATIBILITY. - Removed mcard, is equal to size. - Added attributes: image_mset.id [simp] image_mset_id [simp] elem_multiset_of_set [simp, intro] comp_fun_commute_plus_mset [simp] comp_fun_commute.fold_mset_insert [OF comp_fun_commute_plus_mset, simp] in_mset_fold_plus_iff [iff] set_of_Union_mset [simp] in_Union_mset_iff [iff] INCOMPATIBILITY. * Library/Sum_of_Squares: simplified and improved "sos" method. Always use local CSDP executable, which is much faster than the NEOS server. The "sos_cert" functionality is invoked as "sos" with additional argument. Minor INCOMPATIBILITY. * HOL-Decision_Procs: New counterexample generator quickcheck [approximation] for inequalities of transcendental functions. Uses hardware floating point arithmetic to randomly discover potential counterexamples. Counterexamples are certified with the "approximation" method. See HOL/Decision_Procs/ex/Approximation_Quickcheck_Ex.thy for examples. * HOL-Probability: Reworked measurability prover - applies destructor rules repeatedly - removed application splitting (replaced by destructor rule) - added congruence rules to rewrite measure spaces under the sets projection * New proof method "rewrite" (in theory ~~/src/HOL/Library/Rewrite) for single-step rewriting with subterm selection based on patterns. *** ML *** * Subtle change of name space policy: undeclared entries are now considered inaccessible, instead of accessible via the fully-qualified internal name. This mainly affects Name_Space.intern (and derivatives), which may produce an unexpected Long_Name.hidden prefix. Note that contemporary applications use the strict Name_Space.check (and derivatives) instead, which is not affected by the change. Potential INCOMPATIBILITY in rare applications of Name_Space.intern. * Subtle change of error semantics of Toplevel.proof_of: regular user ERROR instead of internal Toplevel.UNDEF. * Basic combinators map, fold, fold_map, split_list, apply are available as parameterized antiquotations, e.g. @{map 4} for lists of quadruples. * Renamed "pairself" to "apply2", in accordance to @{apply 2}. INCOMPATIBILITY. * Former combinators NAMED_CRITICAL and CRITICAL for central critical sections have been discontinued, in favour of the more elementary Multithreading.synchronized and its high-level derivative Synchronized.var (which is usually sufficient in applications). Subtle INCOMPATIBILITY: synchronized access needs to be atomic and cannot be nested. * Synchronized.value (ML) is actually synchronized (as in Scala): subtle change of semantics with minimal potential for INCOMPATIBILITY. * The main operations to certify logical entities are Thm.ctyp_of and Thm.cterm_of with a local context; old-style global theory variants are available as Thm.global_ctyp_of and Thm.global_cterm_of. INCOMPATIBILITY. * Elementary operations in module Thm are no longer pervasive. INCOMPATIBILITY, need to use qualified Thm.prop_of, Thm.cterm_of, Thm.term_of etc. * Proper context for various elementary tactics: assume_tac, resolve_tac, eresolve_tac, dresolve_tac, forward_tac, match_tac, compose_tac, Splitter.split_tac etc. INCOMPATIBILITY. * Tactical PARALLEL_ALLGOALS is the most common way to refer to PARALLEL_GOALS. * Goal.prove_multi is superseded by the fully general Goal.prove_common, which also allows to specify a fork priority. * Antiquotation @{command_spec "COMMAND"} is superseded by @{command_keyword COMMAND} (usually without quotes and with PIDE markup). Minor INCOMPATIBILITY. * Cartouches within ML sources are turned into values of type Input.source (with formal position information). *** System *** * The Isabelle tool "update_cartouches" changes theory files to use cartouches instead of old-style {* verbatim *} or `alt_string` tokens. * The Isabelle tool "build" provides new options -X, -k, -x. * Discontinued old-fashioned "codegen" tool. Code generation can always be externally triggered using an appropriate ROOT file plus a corresponding theory. Parametrization is possible using environment variables, or ML snippets in the most extreme cases. Minor INCOMPATIBILITY. * JVM system property "isabelle.threads" determines size of Scala thread pool, like Isabelle system option "threads" for ML. * JVM system property "isabelle.laf" determines the default Swing look-and-feel, via internal class name or symbolic name as in the jEdit menu Global Options / Appearance. * Support for Proof General and Isar TTY loop has been discontinued. Minor INCOMPATIBILITY, use standard PIDE infrastructure instead. New in Isabelle2014 (August 2014) --------------------------------- *** General *** * Support for official Standard ML within the Isabelle context. Command 'SML_file' reads and evaluates the given Standard ML file. Toplevel bindings are stored within the theory context; the initial environment is restricted to the Standard ML implementation of Poly/ML, without the add-ons of Isabelle/ML. Commands 'SML_import' and 'SML_export' allow to exchange toplevel bindings between the two separate environments. See also ~~/src/Tools/SML/Examples.thy for some examples. * Standard tactics and proof methods such as "clarsimp", "auto" and "safe" now preserve equality hypotheses "x = expr" where x is a free variable. Locale assumptions and chained facts containing "x" continue to be useful. The new method "hypsubst_thin" and the configuration option "hypsubst_thin" (within the attribute name space) restore the previous behavior. INCOMPATIBILITY, especially where induction is done after these methods or when the names of free and bound variables clash. As first approximation, old proofs may be repaired by "using [[hypsubst_thin = true]]" in the critical spot. * More static checking of proof methods, which allows the system to form a closure over the concrete syntax. Method arguments should be processed in the original proof context as far as possible, before operating on the goal state. In any case, the standard discipline for subgoal-addressing needs to be observed: no subgoals or a subgoal number that is out of range produces an empty result sequence, not an exception. Potential INCOMPATIBILITY for non-conformant tactical proof tools. * Lexical syntax (inner and outer) supports text cartouches with arbitrary nesting, and without escapes of quotes etc. The Prover IDE supports input via ` (backquote). * The outer syntax categories "text" (for formal comments and document markup commands) and "altstring" (for literal fact references) allow cartouches as well, in addition to the traditional mix of quotations. * Syntax of document antiquotation @{rail} now uses \ instead of "\\", to avoid the optical illusion of escaped backslash within string token. General renovation of its syntax using text cartouches. Minor INCOMPATIBILITY. * Discontinued legacy_isub_isup, which was a temporary workaround for Isabelle/ML in Isabelle2013-1. The prover process no longer accepts old identifier syntax with \<^isub> or \<^isup>. Potential INCOMPATIBILITY. * Document antiquotation @{url} produces markup for the given URL, which results in an active hyperlink within the text. * Document antiquotation @{file_unchecked} is like @{file}, but does not check existence within the file-system. * Updated and extended manuals: codegen, datatypes, implementation, isar-ref, jedit, system. *** Prover IDE -- Isabelle/Scala/jEdit *** * Improved Document panel: simplified interaction where every single mouse click (re)opens document via desktop environment or as jEdit buffer. * Support for Navigator plugin (with toolbar buttons), with connection to PIDE hyperlinks. * Auxiliary files ('ML_file' etc.) are managed by the Prover IDE. Open text buffers take precedence over copies within the file-system. * Improved support for Isabelle/ML, with jEdit mode "isabelle-ml" for auxiliary ML files. * Improved syntactic and semantic completion mechanism, with simple templates, completion language context, name-space completion, file-name completion, spell-checker completion. * Refined GUI popup for completion: more robust key/mouse event handling and propagation to enclosing text area -- avoid loosing keystrokes with slow / remote graphics displays. * Completion popup supports both ENTER and TAB (default) to select an item, depending on Isabelle options. * Refined insertion of completion items wrt. jEdit text: multiple selections, rectangular selections, rectangular selection as "tall caret". * Integrated spell-checker for document text, comments etc. with completion popup and context-menu. * More general "Query" panel supersedes "Find" panel, with GUI access to commands 'find_theorems' and 'find_consts', as well as print operations for the context. Minor incompatibility in keyboard shortcuts etc.: replace action isabelle-find by isabelle-query. * Search field for all output panels ("Output", "Query", "Info" etc.) to highlight text via regular expression. * Option "jedit_print_mode" (see also "Plugin Options / Isabelle / General") allows to specify additional print modes for the prover process, without requiring old-fashioned command-line invocation of "isabelle jedit -m MODE". * More support for remote files (e.g. http) using standard Java networking operations instead of jEdit virtual file-systems. * Empty editors buffers that are no longer required (e.g.\ via theory imports) are automatically removed from the document model. * Improved monitor panel. * Improved Console/Scala plugin: more uniform scala.Console output, more robust treatment of threads and interrupts. * Improved management of dockable windows: clarified keyboard focus and window placement wrt. main editor view; optional menu item to "Detach" a copy where this makes sense. * New Simplifier Trace panel provides an interactive view of the simplification process, enabled by the "simp_trace_new" attribute within the context. *** Pure *** * Low-level type-class commands 'classes', 'classrel', 'arities' have been discontinued to avoid the danger of non-trivial axiomatization that is not immediately visible. INCOMPATIBILITY, use regular 'instance' command with proof. The required OFCLASS(...) theorem might be postulated via 'axiomatization' beforehand, or the proof finished trivially if the underlying class definition is made vacuous (without any assumptions). See also Isabelle/ML operations Axclass.class_axiomatization, Axclass.classrel_axiomatization, Axclass.arity_axiomatization. * Basic constants of Pure use more conventional names and are always qualified. Rare INCOMPATIBILITY, but with potentially serious consequences, notably for tools in Isabelle/ML. The following renaming needs to be applied: == ~> Pure.eq ==> ~> Pure.imp all ~> Pure.all TYPE ~> Pure.type dummy_pattern ~> Pure.dummy_pattern Systematic porting works by using the following theory setup on a *previous* Isabelle version to introduce the new name accesses for the old constants: setup {* fn thy => thy |> Sign.root_path |> Sign.const_alias (Binding.qualify true "Pure" @{binding eq}) "==" |> Sign.const_alias (Binding.qualify true "Pure" @{binding imp}) "==>" |> Sign.const_alias (Binding.qualify true "Pure" @{binding all}) "all" |> Sign.restore_naming thy *} Thus ML antiquotations like @{const_name Pure.eq} may be used already. Later the application is moved to the current Isabelle version, and the auxiliary aliases are deleted. * Attributes "where" and "of" allow an optional context of local variables ('for' declaration): these variables become schematic in the instantiated theorem. * Obsolete attribute "standard" has been discontinued (legacy since Isabelle2012). Potential INCOMPATIBILITY, use explicit 'for' context where instantiations with schematic variables are intended (for declaration commands like 'lemmas' or attributes like "of"). The following temporary definition may help to port old applications: attribute_setup standard = "Scan.succeed (Thm.rule_attribute (K Drule.export_without_context))" * More thorough check of proof context for goal statements and attributed fact expressions (concerning background theory, declared hyps). Potential INCOMPATIBILITY, tools need to observe standard context discipline. See also Assumption.add_assumes and the more primitive Thm.assume_hyps. * Inner syntax token language allows regular quoted strings "..." (only makes sense in practice, if outer syntax is delimited differently, e.g. via cartouches). * Command 'print_term_bindings' supersedes 'print_binds' for clarity, but the latter is retained some time as Proof General legacy. * Code generator preprocessor: explicit control of simp tracing on a per-constant basis. See attribute "code_preproc". *** HOL *** * Code generator: enforce case of identifiers only for strict target language requirements. INCOMPATIBILITY. * Code generator: explicit proof contexts in many ML interfaces. INCOMPATIBILITY. * Code generator: minimize exported identifiers by default. Minor INCOMPATIBILITY. * Code generation for SML and OCaml: dropped arcane "no_signatures" option. Minor INCOMPATIBILITY. * "declare [[code abort: ...]]" replaces "code_abort ...". INCOMPATIBILITY. * "declare [[code drop: ...]]" drops all code equations associated with the given constants. * Code generations are provided for make, fields, extend and truncate operations on records. * Command and antiquotation "value" are now hardcoded against nbe and ML. Minor INCOMPATIBILITY. * Renamed command 'enriched_type' to 'functor'. INCOMPATIBILITY. * The symbol "\" may be used within char or string literals to represent (Char Nibble0 NibbleA), i.e. ASCII newline. * Qualified String.implode and String.explode. INCOMPATIBILITY. * Simplifier: Enhanced solver of preconditions of rewrite rules can now deal with conjunctions. For help with converting proofs, the old behaviour of the simplifier can be restored like this: declare/using [[simp_legacy_precond]]. This configuration option will disappear again in the future. INCOMPATIBILITY. * Simproc "finite_Collect" is no longer enabled by default, due to spurious crashes and other surprises. Potential INCOMPATIBILITY. * Moved new (co)datatype package and its dependencies from session "HOL-BNF" to "HOL". The commands 'bnf', 'wrap_free_constructors', 'datatype_new', 'codatatype', 'primcorec', 'primcorecursive' are now part of theory "Main". Theory renamings: FunDef.thy ~> Fun_Def.thy (and Fun_Def_Base.thy) Library/Wfrec.thy ~> Wfrec.thy Library/Zorn.thy ~> Zorn.thy Cardinals/Order_Relation.thy ~> Order_Relation.thy Library/Order_Union.thy ~> Cardinals/Order_Union.thy Cardinals/Cardinal_Arithmetic_Base.thy ~> BNF_Cardinal_Arithmetic.thy Cardinals/Cardinal_Order_Relation_Base.thy ~> BNF_Cardinal_Order_Relation.thy Cardinals/Constructions_on_Wellorders_Base.thy ~> BNF_Constructions_on_Wellorders.thy Cardinals/Wellorder_Embedding_Base.thy ~> BNF_Wellorder_Embedding.thy Cardinals/Wellorder_Relation_Base.thy ~> BNF_Wellorder_Relation.thy BNF/Ctr_Sugar.thy ~> Ctr_Sugar.thy BNF/Basic_BNFs.thy ~> Basic_BNFs.thy BNF/BNF_Comp.thy ~> BNF_Comp.thy BNF/BNF_Def.thy ~> BNF_Def.thy BNF/BNF_FP_Base.thy ~> BNF_FP_Base.thy BNF/BNF_GFP.thy ~> BNF_GFP.thy BNF/BNF_LFP.thy ~> BNF_LFP.thy BNF/BNF_Util.thy ~> BNF_Util.thy BNF/Coinduction.thy ~> Coinduction.thy BNF/More_BNFs.thy ~> Library/More_BNFs.thy BNF/Countable_Type.thy ~> Library/Countable_Set_Type.thy BNF/Examples/* ~> BNF_Examples/* New theories: Wellorder_Extension.thy (split from Zorn.thy) Library/Cardinal_Notations.thy Library/BNF_Axomatization.thy BNF_Examples/Misc_Primcorec.thy BNF_Examples/Stream_Processor.thy Discontinued theories: BNF/BNF.thy BNF/Equiv_Relations_More.thy INCOMPATIBILITY. * New (co)datatype package: - Command 'primcorec' is fully implemented. - Command 'datatype_new' generates size functions ("size_xxx" and "size") as required by 'fun'. - BNFs are integrated with the Lifting tool and new-style (co)datatypes with Transfer. - Renamed commands: datatype_new_compat ~> datatype_compat primrec_new ~> primrec wrap_free_constructors ~> free_constructors INCOMPATIBILITY. - The generated constants "xxx_case" and "xxx_rec" have been renamed "case_xxx" and "rec_xxx" (e.g., "prod_case" ~> "case_prod"). INCOMPATIBILITY. - The constant "xxx_(un)fold" and related theorems are no longer generated. Use "xxx_(co)rec" or define "xxx_(un)fold" manually using "prim(co)rec". INCOMPATIBILITY. - No discriminators are generated for nullary constructors by default, eliminating the need for the odd "=:" syntax. INCOMPATIBILITY. - No discriminators or selectors are generated by default by "datatype_new", unless custom names are specified or the new "discs_sels" option is passed. INCOMPATIBILITY. * Old datatype package: - The generated theorems "xxx.cases" and "xxx.recs" have been renamed "xxx.case" and "xxx.rec" (e.g., "sum.cases" -> "sum.case"). INCOMPATIBILITY. - The generated constants "xxx_case", "xxx_rec", and "xxx_size" have been renamed "case_xxx", "rec_xxx", and "size_xxx" (e.g., "prod_case" ~> "case_prod"). INCOMPATIBILITY. * The types "'a list" and "'a option", their set and map functions, their relators, and their selectors are now produced using the new BNF-based datatype package. Renamed constants: Option.set ~> set_option Option.map ~> map_option option_rel ~> rel_option Renamed theorems: set_def ~> set_rec[abs_def] map_def ~> map_rec[abs_def] Option.map_def ~> map_option_case[abs_def] (with "case_option" instead of "rec_option") option.recs ~> option.rec list_all2_def ~> list_all2_iff set.simps ~> set_simps (or the slightly different "list.set") map.simps ~> list.map hd.simps ~> list.sel(1) tl.simps ~> list.sel(2-3) the.simps ~> option.sel INCOMPATIBILITY. * The following map functions and relators have been renamed: sum_map ~> map_sum map_pair ~> map_prod prod_rel ~> rel_prod sum_rel ~> rel_sum fun_rel ~> rel_fun set_rel ~> rel_set filter_rel ~> rel_filter fset_rel ~> rel_fset (in "src/HOL/Library/FSet.thy") cset_rel ~> rel_cset (in "src/HOL/Library/Countable_Set_Type.thy") vset ~> rel_vset (in "src/HOL/Library/Quotient_Set.thy") INCOMPATIBILITY. * Lifting and Transfer: - a type variable as a raw type is supported - stronger reflexivity prover - rep_eq is always generated by lift_definition - setup for Lifting/Transfer is now automated for BNFs + holds for BNFs that do not contain a dead variable + relator_eq, relator_mono, relator_distr, relator_domain, relator_eq_onp, quot_map, transfer rules for bi_unique, bi_total, right_unique, right_total, left_unique, left_total are proved automatically + definition of a predicator is generated automatically + simplification rules for a predicator definition are proved automatically for datatypes - consolidation of the setup of Lifting/Transfer + property that a relator preservers reflexivity is not needed any more Minor INCOMPATIBILITY. + left_total and left_unique rules are now transfer rules (reflexivity_rule attribute not needed anymore) INCOMPATIBILITY. + Domainp does not have to be a separate assumption in relator_domain theorems (=> more natural statement) INCOMPATIBILITY. - registration of code equations is more robust Potential INCOMPATIBILITY. - respectfulness proof obligation is preprocessed to a more readable form Potential INCOMPATIBILITY. - eq_onp is always unfolded in respectfulness proof obligation Potential INCOMPATIBILITY. - unregister lifting setup for Code_Numeral.integer and Code_Numeral.natural Potential INCOMPATIBILITY. - Lifting.invariant -> eq_onp INCOMPATIBILITY. * New internal SAT solver "cdclite" that produces models and proof traces. This solver replaces the internal SAT solvers "enumerate" and "dpll". Applications that explicitly used one of these two SAT solvers should use "cdclite" instead. In addition, "cdclite" is now the default SAT solver for the "sat" and "satx" proof methods and corresponding tactics; the old default can be restored using "declare [[sat_solver = zchaff_with_proofs]]". Minor INCOMPATIBILITY. * SMT module: A new version of the SMT module, temporarily called "SMT2", uses SMT-LIB 2 and supports recent versions of Z3 (e.g., 4.3). The new proof method is called "smt2". CVC3 and CVC4 are also supported as oracles. Yices is no longer supported, because no version of the solver can handle both SMT-LIB 2 and quantifiers. * Activation of Z3 now works via "z3_non_commercial" system option (without requiring restart), instead of former settings variable "Z3_NON_COMMERCIAL". The option can be edited in Isabelle/jEdit menu Plugin Options / Isabelle / General. * Sledgehammer: - Z3 can now produce Isar proofs. - MaSh overhaul: . New SML-based learning algorithms eliminate the dependency on Python and increase performance and reliability. . MaSh and MeSh are now used by default together with the traditional MePo (Meng-Paulson) relevance filter. To disable MaSh, set the "MaSh" system option in Isabelle/jEdit Plugin Options / Isabelle / General to "none". - New option: smt_proofs - Renamed options: isar_compress ~> compress isar_try0 ~> try0 INCOMPATIBILITY. * Removed solvers remote_cvc3 and remote_z3. Use cvc3 and z3 instead. * Nitpick: - Fixed soundness bug whereby mutually recursive datatypes could take infinite values. - Fixed soundness bug with low-level number functions such as "Abs_Integ" and "Rep_Integ". - Removed "std" option. - Renamed "show_datatypes" to "show_types" and "hide_datatypes" to "hide_types". * Metis: Removed legacy proof method 'metisFT'. Use 'metis (full_types)' instead. INCOMPATIBILITY. * Try0: Added 'algebra' and 'meson' to the set of proof methods. * Adjustion of INF and SUP operations: - Elongated constants INFI and SUPR to INFIMUM and SUPREMUM. - Consolidated theorem names containing INFI and SUPR: have INF and SUP instead uniformly. - More aggressive normalization of expressions involving INF and Inf or SUP and Sup. - INF_image and SUP_image do not unfold composition. - Dropped facts INF_comp, SUP_comp. - Default congruence rules strong_INF_cong and strong_SUP_cong, with simplifier implication in premises. Generalize and replace former INT_cong, SUP_cong INCOMPATIBILITY. * SUP and INF generalized to conditionally_complete_lattice. * Swapped orientation of facts image_comp and vimage_comp: image_compose ~> image_comp [symmetric] image_comp ~> image_comp [symmetric] vimage_compose ~> vimage_comp [symmetric] vimage_comp ~> vimage_comp [symmetric] INCOMPATIBILITY. * Theory reorganization: split of Big_Operators.thy into Groups_Big.thy and Lattices_Big.thy. * Consolidated some facts about big group operators: setsum_0' ~> setsum.neutral setsum_0 ~> setsum.neutral_const setsum_addf ~> setsum.distrib setsum_cartesian_product ~> setsum.cartesian_product setsum_cases ~> setsum.If_cases setsum_commute ~> setsum.commute setsum_cong ~> setsum.cong setsum_delta ~> setsum.delta setsum_delta' ~> setsum.delta' setsum_diff1' ~> setsum.remove setsum_empty ~> setsum.empty setsum_infinite ~> setsum.infinite setsum_insert ~> setsum.insert setsum_inter_restrict'' ~> setsum.inter_filter setsum_mono_zero_cong_left ~> setsum.mono_neutral_cong_left setsum_mono_zero_cong_right ~> setsum.mono_neutral_cong_right setsum_mono_zero_left ~> setsum.mono_neutral_left setsum_mono_zero_right ~> setsum.mono_neutral_right setsum_reindex ~> setsum.reindex setsum_reindex_cong ~> setsum.reindex_cong setsum_reindex_nonzero ~> setsum.reindex_nontrivial setsum_restrict_set ~> setsum.inter_restrict setsum_Plus ~> setsum.Plus setsum_setsum_restrict ~> setsum.commute_restrict setsum_Sigma ~> setsum.Sigma setsum_subset_diff ~> setsum.subset_diff setsum_Un_disjoint ~> setsum.union_disjoint setsum_UN_disjoint ~> setsum.UNION_disjoint setsum_Un_Int ~> setsum.union_inter setsum_Union_disjoint ~> setsum.Union_disjoint setsum_UNION_zero ~> setsum.Union_comp setsum_Un_zero ~> setsum.union_inter_neutral strong_setprod_cong ~> setprod.strong_cong strong_setsum_cong ~> setsum.strong_cong setprod_1' ~> setprod.neutral setprod_1 ~> setprod.neutral_const setprod_cartesian_product ~> setprod.cartesian_product setprod_cong ~> setprod.cong setprod_delta ~> setprod.delta setprod_delta' ~> setprod.delta' setprod_empty ~> setprod.empty setprod_infinite ~> setprod.infinite setprod_insert ~> setprod.insert setprod_mono_one_cong_left ~> setprod.mono_neutral_cong_left setprod_mono_one_cong_right ~> setprod.mono_neutral_cong_right setprod_mono_one_left ~> setprod.mono_neutral_left setprod_mono_one_right ~> setprod.mono_neutral_right setprod_reindex ~> setprod.reindex setprod_reindex_cong ~> setprod.reindex_cong setprod_reindex_nonzero ~> setprod.reindex_nontrivial setprod_Sigma ~> setprod.Sigma setprod_subset_diff ~> setprod.subset_diff setprod_timesf ~> setprod.distrib setprod_Un2 ~> setprod.union_diff2 setprod_Un_disjoint ~> setprod.union_disjoint setprod_UN_disjoint ~> setprod.UNION_disjoint setprod_Un_Int ~> setprod.union_inter setprod_Union_disjoint ~> setprod.Union_disjoint setprod_Un_one ~> setprod.union_inter_neutral Dropped setsum_cong2 (simple variant of setsum.cong). Dropped setsum_inter_restrict' (simple variant of setsum.inter_restrict) Dropped setsum_reindex_id, setprod_reindex_id (simple variants of setsum.reindex [symmetric], setprod.reindex [symmetric]). INCOMPATIBILITY. * Abolished slightly odd global lattice interpretation for min/max. Fact consolidations: min_max.inf_assoc ~> min.assoc min_max.inf_commute ~> min.commute min_max.inf_left_commute ~> min.left_commute min_max.inf_idem ~> min.idem min_max.inf_left_idem ~> min.left_idem min_max.inf_right_idem ~> min.right_idem min_max.sup_assoc ~> max.assoc min_max.sup_commute ~> max.commute min_max.sup_left_commute ~> max.left_commute min_max.sup_idem ~> max.idem min_max.sup_left_idem ~> max.left_idem min_max.sup_inf_distrib1 ~> max_min_distrib2 min_max.sup_inf_distrib2 ~> max_min_distrib1 min_max.inf_sup_distrib1 ~> min_max_distrib2 min_max.inf_sup_distrib2 ~> min_max_distrib1 min_max.distrib ~> min_max_distribs min_max.inf_absorb1 ~> min.absorb1 min_max.inf_absorb2 ~> min.absorb2 min_max.sup_absorb1 ~> max.absorb1 min_max.sup_absorb2 ~> max.absorb2 min_max.le_iff_inf ~> min.absorb_iff1 min_max.le_iff_sup ~> max.absorb_iff2 min_max.inf_le1 ~> min.cobounded1 min_max.inf_le2 ~> min.cobounded2 le_maxI1, min_max.sup_ge1 ~> max.cobounded1 le_maxI2, min_max.sup_ge2 ~> max.cobounded2 min_max.le_infI1 ~> min.coboundedI1 min_max.le_infI2 ~> min.coboundedI2 min_max.le_supI1 ~> max.coboundedI1 min_max.le_supI2 ~> max.coboundedI2 min_max.less_infI1 ~> min.strict_coboundedI1 min_max.less_infI2 ~> min.strict_coboundedI2 min_max.less_supI1 ~> max.strict_coboundedI1 min_max.less_supI2 ~> max.strict_coboundedI2 min_max.inf_mono ~> min.mono min_max.sup_mono ~> max.mono min_max.le_infI, min_max.inf_greatest ~> min.boundedI min_max.le_supI, min_max.sup_least ~> max.boundedI min_max.le_inf_iff ~> min.bounded_iff min_max.le_sup_iff ~> max.bounded_iff For min_max.inf_sup_aci, prefer (one of) min.commute, min.assoc, min.left_commute, min.left_idem, max.commute, max.assoc, max.left_commute, max.left_idem directly. For min_max.inf_sup_ord, prefer (one of) min.cobounded1, min.cobounded2, max.cobounded1m max.cobounded2 directly. For min_ac or max_ac, prefer more general collection ac_simps. INCOMPATIBILITY. * Theorem disambiguation Inf_le_Sup (on finite sets) ~> Inf_fin_le_Sup_fin. INCOMPATIBILITY. * Qualified constant names Wellfounded.acc, Wellfounded.accp. INCOMPATIBILITY. * Fact generalization and consolidation: neq_one_mod_two, mod_2_not_eq_zero_eq_one_int ~> not_mod_2_eq_0_eq_1 INCOMPATIBILITY. * Purely algebraic definition of even. Fact generalization and consolidation: nat_even_iff_2_dvd, int_even_iff_2_dvd ~> even_iff_2_dvd even_zero_(nat|int) ~> even_zero INCOMPATIBILITY. * Abolished neg_numeral. - Canonical representation for minus one is "- 1". - Canonical representation for other negative numbers is "- (numeral _)". - When devising rule sets for number calculation, consider the following canonical cases: 0, 1, numeral _, - 1, - numeral _. - HOLogic.dest_number also recognizes numerals in non-canonical forms like "numeral One", "- numeral One", "- 0" and even "- ... - _". - Syntax for negative numerals is mere input syntax. INCOMPATIBILITY. * Reduced name variants for rules on associativity and commutativity: add_assoc ~> add.assoc add_commute ~> add.commute add_left_commute ~> add.left_commute mult_assoc ~> mult.assoc mult_commute ~> mult.commute mult_left_commute ~> mult.left_commute nat_add_assoc ~> add.assoc nat_add_commute ~> add.commute nat_add_left_commute ~> add.left_commute nat_mult_assoc ~> mult.assoc nat_mult_commute ~> mult.commute eq_assoc ~> iff_assoc eq_left_commute ~> iff_left_commute INCOMPATIBILITY. * Fact collections add_ac and mult_ac are considered old-fashioned. Prefer ac_simps instead, or specify rules (add|mult).(assoc|commute|left_commute) individually. * Elimination of fact duplicates: equals_zero_I ~> minus_unique diff_eq_0_iff_eq ~> right_minus_eq nat_infinite ~> infinite_UNIV_nat int_infinite ~> infinite_UNIV_int INCOMPATIBILITY. * Fact name consolidation: diff_def, diff_minus, ab_diff_minus ~> diff_conv_add_uminus minus_le_self_iff ~> neg_less_eq_nonneg le_minus_self_iff ~> less_eq_neg_nonpos neg_less_nonneg ~> neg_less_pos less_minus_self_iff ~> less_neg_neg [simp] INCOMPATIBILITY. * More simplification rules on unary and binary minus: add_diff_cancel, add_diff_cancel_left, add_le_same_cancel1, add_le_same_cancel2, add_less_same_cancel1, add_less_same_cancel2, add_minus_cancel, diff_add_cancel, le_add_same_cancel1, le_add_same_cancel2, less_add_same_cancel1, less_add_same_cancel2, minus_add_cancel, uminus_add_conv_diff. These correspondingly have been taken away from fact collections algebra_simps and field_simps. INCOMPATIBILITY. To restore proofs, the following patterns are helpful: a) Arbitrary failing proof not involving "diff_def": Consider simplification with algebra_simps or field_simps. b) Lifting rules from addition to subtraction: Try with "using of [... "- _" ...]" by simp". c) Simplification with "diff_def": just drop "diff_def". Consider simplification with algebra_simps or field_simps; or the brute way with "simp add: diff_conv_add_uminus del: add_uminus_conv_diff". * Introduce bdd_above and bdd_below in theory Conditionally_Complete_Lattices, use them instead of explicitly stating boundedness of sets. * ccpo.admissible quantifies only over non-empty chains to allow more syntax-directed proof rules; the case of the empty chain shows up as additional case in fixpoint induction proofs. INCOMPATIBILITY. * Removed and renamed theorems in Series: summable_le ~> suminf_le suminf_le ~> suminf_le_const series_pos_le ~> setsum_le_suminf series_pos_less ~> setsum_less_suminf suminf_ge_zero ~> suminf_nonneg suminf_gt_zero ~> suminf_pos suminf_gt_zero_iff ~> suminf_pos_iff summable_sumr_LIMSEQ_suminf ~> summable_LIMSEQ suminf_0_le ~> suminf_nonneg [rotate] pos_summable ~> summableI_nonneg_bounded ratio_test ~> summable_ratio_test removed series_zero, replaced by sums_finite removed auxiliary lemmas: sumr_offset, sumr_offset2, sumr_offset3, sumr_offset4, sumr_group, half, le_Suc_ex_iff, lemma_realpow_diff_sumr, real_setsum_nat_ivl_bounded, summable_le2, ratio_test_lemma2, sumr_minus_one_realpow_zerom, sumr_one_lb_realpow_zero, summable_convergent_sumr_iff, sumr_diff_mult_const INCOMPATIBILITY. * Replace (F)DERIV syntax by has_derivative: - "(f has_derivative f') (at x within s)" replaces "FDERIV f x : s : f'" - "(f has_field_derivative f') (at x within s)" replaces "DERIV f x : s : f'" - "f differentiable at x within s" replaces "_ differentiable _ in _" syntax - removed constant isDiff - "DERIV f x : f'" and "FDERIV f x : f'" syntax is only available as input syntax. - "DERIV f x : s : f'" and "FDERIV f x : s : f'" syntax removed. - Renamed FDERIV_... lemmas to has_derivative_... - renamed deriv (the syntax constant used for "DERIV _ _ :> _") to DERIV - removed DERIV_intros, has_derivative_eq_intros - introduced derivative_intros and deriative_eq_intros which includes now rules for DERIV, has_derivative and has_vector_derivative. - Other renamings: differentiable_def ~> real_differentiable_def differentiableE ~> real_differentiableE fderiv_def ~> has_derivative_at field_fderiv_def ~> field_has_derivative_at isDiff_der ~> differentiable_def deriv_fderiv ~> has_field_derivative_def deriv_def ~> DERIV_def INCOMPATIBILITY. * Include more theorems in continuous_intros. Remove the continuous_on_intros, isCont_intros collections, these facts are now in continuous_intros. * Theorems about complex numbers are now stated only using Re and Im, the Complex constructor is not used anymore. It is possible to use primcorec to defined the behaviour of a complex-valued function. Removed theorems about the Complex constructor from the simpset, they are available as the lemma collection legacy_Complex_simps. This especially removes i_complex_of_real: "ii * complex_of_real r = Complex 0 r". Instead the reverse direction is supported with Complex_eq: "Complex a b = a + \ * b" Moved csqrt from Fundamental_Algebra_Theorem to Complex. Renamings: Re/Im ~> complex.sel complex_Re/Im_zero ~> zero_complex.sel complex_Re/Im_add ~> plus_complex.sel complex_Re/Im_minus ~> uminus_complex.sel complex_Re/Im_diff ~> minus_complex.sel complex_Re/Im_one ~> one_complex.sel complex_Re/Im_mult ~> times_complex.sel complex_Re/Im_inverse ~> inverse_complex.sel complex_Re/Im_scaleR ~> scaleR_complex.sel complex_Re/Im_i ~> ii.sel complex_Re/Im_cnj ~> cnj.sel Re/Im_cis ~> cis.sel complex_divide_def ~> divide_complex_def complex_norm_def ~> norm_complex_def cmod_def ~> norm_complex_de Removed theorems: complex_zero_def complex_add_def complex_minus_def complex_diff_def complex_one_def complex_mult_def complex_inverse_def complex_scaleR_def INCOMPATIBILITY. * Theory Lubs moved HOL image to HOL-Library. It is replaced by Conditionally_Complete_Lattices. INCOMPATIBILITY. * HOL-Library: new theory src/HOL/Library/Tree.thy. * HOL-Library: removed theory src/HOL/Library/Kleene_Algebra.thy; it is subsumed by session Kleene_Algebra in AFP. * HOL-Library / theory RBT: various constants and facts are hidden; lifting setup is unregistered. INCOMPATIBILITY. * HOL-Cardinals: new theory src/HOL/Cardinals/Ordinal_Arithmetic.thy. * HOL-Word: bit representations prefer type bool over type bit. INCOMPATIBILITY. * HOL-Word: - Abandoned fact collection "word_arith_alts", which is a duplicate of "word_arith_wis". - Dropped first (duplicated) element in fact collections "sint_word_ariths", "word_arith_alts", "uint_word_ariths", "uint_word_arith_bintrs". * HOL-Number_Theory: - consolidated the proofs of the binomial theorem - the function fib is again of type nat => nat and not overloaded - no more references to Old_Number_Theory in the HOL libraries (except the AFP) INCOMPATIBILITY. * HOL-Multivariate_Analysis: - Type class ordered_real_vector for ordered vector spaces. - New theory Complex_Basic_Analysis defining complex derivatives, holomorphic functions, etc., ported from HOL Light's canal.ml. - Changed order of ordered_euclidean_space to be compatible with pointwise ordering on products. Therefore instance of conditionally_complete_lattice and ordered_real_vector. INCOMPATIBILITY: use box instead of greaterThanLessThan or explicit set-comprehensions with eucl_less for other (half-)open intervals. - removed dependencies on type class ordered_euclidean_space with introduction of "cbox" on euclidean_space - renamed theorems: interval ~> box mem_interval ~> mem_box interval_eq_empty ~> box_eq_empty interval_ne_empty ~> box_ne_empty interval_sing(1) ~> cbox_sing interval_sing(2) ~> box_sing subset_interval_imp ~> subset_box_imp subset_interval ~> subset_box open_interval ~> open_box closed_interval ~> closed_cbox interior_closed_interval ~> interior_cbox bounded_closed_interval ~> bounded_cbox compact_interval ~> compact_cbox bounded_subset_closed_interval_symmetric ~> bounded_subset_cbox_symmetric bounded_subset_closed_interval ~> bounded_subset_cbox mem_interval_componentwiseI ~> mem_box_componentwiseI convex_box ~> convex_prod rel_interior_real_interval ~> rel_interior_real_box convex_interval ~> convex_box convex_hull_eq_real_interval ~> convex_hull_eq_real_cbox frechet_derivative_within_closed_interval ~> frechet_derivative_within_cbox content_closed_interval' ~> content_cbox' elementary_subset_interval ~> elementary_subset_box diameter_closed_interval ~> diameter_cbox frontier_closed_interval ~> frontier_cbox frontier_open_interval ~> frontier_box bounded_subset_open_interval_symmetric ~> bounded_subset_box_symmetric closure_open_interval ~> closure_box open_closed_interval_convex ~> open_cbox_convex open_interval_midpoint ~> box_midpoint content_image_affinity_interval ~> content_image_affinity_cbox is_interval_interval ~> is_interval_cbox + is_interval_box + is_interval_closed_interval bounded_interval ~> bounded_closed_interval + bounded_boxes - respective theorems for intervals over the reals: content_closed_interval + content_cbox has_integral + has_integral_real fine_division_exists + fine_division_exists_real has_integral_null + has_integral_null_real tagged_division_union_interval + tagged_division_union_interval_real has_integral_const + has_integral_const_real integral_const + integral_const_real has_integral_bound + has_integral_bound_real integrable_continuous + integrable_continuous_real integrable_subinterval + integrable_subinterval_real has_integral_reflect_lemma + has_integral_reflect_lemma_real integrable_reflect + integrable_reflect_real integral_reflect + integral_reflect_real image_affinity_interval + image_affinity_cbox image_smult_interval + image_smult_cbox integrable_const + integrable_const_ivl integrable_on_subinterval + integrable_on_subcbox - renamed theorems: derivative_linear ~> has_derivative_bounded_linear derivative_is_linear ~> has_derivative_linear bounded_linear_imp_linear ~> bounded_linear.linear * HOL-Probability: - Renamed positive_integral to nn_integral: . Renamed all lemmas "*positive_integral*" to *nn_integral*" positive_integral_positive ~> nn_integral_nonneg . Renamed abbreviation integral\<^sup>P to integral\<^sup>N. - replaced the Lebesgue integral on real numbers by the more general Bochner integral for functions into a real-normed vector space. integral_zero ~> integral_zero / integrable_zero integral_minus ~> integral_minus / integrable_minus integral_add ~> integral_add / integrable_add integral_diff ~> integral_diff / integrable_diff integral_setsum ~> integral_setsum / integrable_setsum integral_multc ~> integral_mult_left / integrable_mult_left integral_cmult ~> integral_mult_right / integrable_mult_right integral_triangle_inequality~> integral_norm_bound integrable_nonneg ~> integrableI_nonneg integral_positive ~> integral_nonneg_AE integrable_abs_iff ~> integrable_abs_cancel positive_integral_lim_INF ~> nn_integral_liminf lebesgue_real_affine ~> lborel_real_affine borel_integral_has_integral ~> has_integral_lebesgue_integral integral_indicator ~> integral_real_indicator / integrable_real_indicator positive_integral_fst ~> nn_integral_fst' positive_integral_fst_measurable ~> nn_integral_fst positive_integral_snd_measurable ~> nn_integral_snd integrable_fst_measurable ~> integral_fst / integrable_fst / AE_integrable_fst integrable_snd_measurable ~> integral_snd / integrable_snd / AE_integrable_snd integral_monotone_convergence ~> integral_monotone_convergence / integrable_monotone_convergence integral_monotone_convergence_at_top ~> integral_monotone_convergence_at_top / integrable_monotone_convergence_at_top has_integral_iff_positive_integral_lebesgue ~> has_integral_iff_has_bochner_integral_lebesgue_nonneg lebesgue_integral_has_integral ~> has_integral_integrable_lebesgue_nonneg positive_integral_lebesgue_has_integral ~> integral_has_integral_lebesgue_nonneg / integrable_has_integral_lebesgue_nonneg lebesgue_integral_real_affine ~> nn_integral_real_affine has_integral_iff_positive_integral_lborel ~> integral_has_integral_nonneg / integrable_has_integral_nonneg The following theorems where removed: lebesgue_integral_nonneg lebesgue_integral_uminus lebesgue_integral_cmult lebesgue_integral_multc lebesgue_integral_cmult_nonneg integral_cmul_indicator integral_real - Formalized properties about exponentially, Erlang, and normal distributed random variables. * HOL-Decision_Procs: Separate command 'approximate' for approximative computation in src/HOL/Decision_Procs/Approximation. Minor INCOMPATIBILITY. *** Scala *** * The signature and semantics of Document.Snapshot.cumulate_markup / select_markup have been clarified. Markup is now traversed in the order of reports given by the prover: later markup is usually more specific and may override results accumulated so far. The elements guard is mandatory and checked precisely. Subtle INCOMPATIBILITY. * Substantial reworking of internal PIDE protocol communication channels. INCOMPATIBILITY. *** ML *** * Subtle change of semantics of Thm.eq_thm: theory stamps are not compared (according to Thm.thm_ord), but assumed to be covered by the current background theory. Thus equivalent data produced in different branches of the theory graph usually coincides (e.g. relevant for theory merge). Note that the softer Thm.eq_thm_prop is often more appropriate than Thm.eq_thm. * Proper context for basic Simplifier operations: rewrite_rule, rewrite_goals_rule, rewrite_goals_tac etc. INCOMPATIBILITY, need to pass runtime Proof.context (and ensure that the simplified entity actually belongs to it). * Proper context discipline for read_instantiate and instantiate_tac: variables that are meant to become schematic need to be given as fixed, and are generalized by the explicit context of local variables. This corresponds to Isar attributes "where" and "of" with 'for' declaration. INCOMPATIBILITY, also due to potential change of indices of schematic variables. * Moved ML_Compiler.exn_trace and other operations on exceptions to structure Runtime. Minor INCOMPATIBILITY. * Discontinued old Toplevel.debug in favour of system option "ML_exception_trace", which may be also declared within the context via "declare [[ML_exception_trace = true]]". Minor INCOMPATIBILITY. * Renamed configuration option "ML_trace" to "ML_source_trace". Minor INCOMPATIBILITY. * Configuration option "ML_print_depth" controls the pretty-printing depth of the ML compiler within the context. The old print_depth in ML is still available as default_print_depth, but rarely used. Minor INCOMPATIBILITY. * Toplevel function "use" refers to raw ML bootstrap environment, without Isar context nor antiquotations. Potential INCOMPATIBILITY. Note that 'ML_file' is the canonical command to load ML files into the formal context. * Simplified programming interface to define ML antiquotations, see structure ML_Antiquotation. Minor INCOMPATIBILITY. * ML antiquotation @{here} refers to its source position, which is occasionally useful for experimentation and diagnostic purposes. * ML antiquotation @{path} produces a Path.T value, similarly to Path.explode, but with compile-time check against the file-system and some PIDE markup. Note that unlike theory source, ML does not have a well-defined master directory, so an absolute symbolic path specification is usually required, e.g. "~~/src/HOL". * ML antiquotation @{print} inlines a function to print an arbitrary ML value, which is occasionally useful for diagnostic or demonstration purposes. *** System *** * Proof General with its traditional helper scripts is now an optional Isabelle component, e.g. see ProofGeneral-4.2-2 from the Isabelle component repository http://isabelle.in.tum.de/components/. Note that the "system" manual provides general explanations about add-on components, especially those that are not bundled with the release. * The raw Isabelle process executable has been renamed from "isabelle-process" to "isabelle_process", which conforms to common shell naming conventions, and allows to define a shell function within the Isabelle environment to avoid dynamic path lookup. Rare incompatibility for old tools that do not use the ISABELLE_PROCESS settings variable. * Former "isabelle tty" has been superseded by "isabelle console", with implicit build like "isabelle jedit", and without the mostly obsolete Isar TTY loop. * Simplified "isabelle display" tool. Settings variables DVI_VIEWER and PDF_VIEWER now refer to the actual programs, not shell command-lines. Discontinued option -c: invocation may be asynchronous via desktop environment, without any special precautions. Potential INCOMPATIBILITY with ambitious private settings. * Removed obsolete "isabelle unsymbolize". Note that the usual format for email communication is the Unicode rendering of Isabelle symbols, as produced by Isabelle/jEdit, for example. * Removed obsolete tool "wwwfind". Similar functionality may be integrated into Isabelle/jEdit eventually. * Improved 'display_drafts' concerning desktop integration and repeated invocation in PIDE front-end: re-use single file $ISABELLE_HOME_USER/tmp/drafts.pdf and corresponding views. * Session ROOT specifications require explicit 'document_files' for robust dependencies on LaTeX sources. Only these explicitly given files are copied to the document output directory, before document processing is started. * Windows: support for regular TeX installation (e.g. MiKTeX) instead of TeX Live from Cygwin. New in Isabelle2013-2 (December 2013) ------------------------------------- *** Prover IDE -- Isabelle/Scala/jEdit *** * More robust editing of running commands with internal forks, e.g. non-terminating 'by' steps. * More relaxed Sledgehammer panel: avoid repeated application of query after edits surrounding the command location. * More status information about commands that are interrupted accidentally (via physical event or Poly/ML runtime system signal, e.g. out-of-memory). *** System *** * More robust termination of external processes managed by Isabelle/ML: support cancellation of tasks within the range of milliseconds, as required for PIDE document editing with automatically tried tools (e.g. Sledgehammer). * Reactivated Isabelle/Scala kill command for external processes on Mac OS X, which was accidentally broken in Isabelle2013-1 due to a workaround for some Debian/Ubuntu Linux versions from 2013. New in Isabelle2013-1 (November 2013) ------------------------------------- *** General *** * Discontinued obsolete 'uses' within theory header. Note that commands like 'ML_file' work without separate declaration of file dependencies. Minor INCOMPATIBILITY. * Discontinued redundant 'use' command, which was superseded by 'ML_file' in Isabelle2013. Minor INCOMPATIBILITY. * Simplified subscripts within identifiers, using plain \<^sub> instead of the second copy \<^isub> and \<^isup>. Superscripts are only for literal tokens within notation; explicit mixfix annotations for consts or fixed variables may be used as fall-back for unusual names. Obsolete \ has been expanded to \<^sup>2 in Isabelle/HOL. INCOMPATIBILITY, use "isabelle update_sub_sup" to standardize symbols as a starting point for further manual cleanup. The ML reference variable "legacy_isub_isup" may be set as temporary workaround, to make the prover accept a subset of the old identifier syntax. * Document antiquotations: term style "isub" has been renamed to "sub". Minor INCOMPATIBILITY. * Uniform management of "quick_and_dirty" as system option (see also "isabelle options"), configuration option within the context (see also Config.get in Isabelle/ML), and attribute in Isabelle/Isar. Minor INCOMPATIBILITY, need to use more official Isabelle means to access quick_and_dirty, instead of historical poking into mutable reference. * Renamed command 'print_configs' to 'print_options'. Minor INCOMPATIBILITY. * Proper diagnostic command 'print_state'. Old 'pr' (with its implicit change of some global references) is retained for now as control command, e.g. for ProofGeneral 3.7.x. * Discontinued 'print_drafts' command with its old-fashioned PS output and Unix command-line print spooling. Minor INCOMPATIBILITY: use 'display_drafts' instead and print via the regular document viewer. * Updated and extended "isar-ref" and "implementation" manual, eliminated old "ref" manual. *** Prover IDE -- Isabelle/Scala/jEdit *** * New manual "jedit" for Isabelle/jEdit, see isabelle doc or Documentation panel. * Dockable window "Documentation" provides access to Isabelle documentation. * Dockable window "Find" provides query operations for formal entities (GUI front-end to 'find_theorems' command). * Dockable window "Sledgehammer" manages asynchronous / parallel sledgehammer runs over existing document sources, independently of normal editing and checking process. * Dockable window "Timing" provides an overview of relevant command timing information, depending on option jedit_timing_threshold. The same timing information is shown in the extended tooltip of the command keyword, when hovering the mouse over it while the CONTROL or COMMAND modifier is pressed. * Improved dockable window "Theories": Continuous checking of proof document (visible and required parts) may be controlled explicitly, using check box or shortcut "C+e ENTER". Individual theory nodes may be marked explicitly as required and checked in full, using check box or shortcut "C+e SPACE". * Improved completion mechanism, which is now managed by the Isabelle/jEdit plugin instead of SideKick. Refined table of Isabelle symbol abbreviations (see $ISABELLE_HOME/etc/symbols). * Standard jEdit keyboard shortcut C+b complete-word is remapped to isabelle.complete for explicit completion in Isabelle sources. INCOMPATIBILITY wrt. jEdit defaults, may have to invent new shortcuts to resolve conflict. * Improved support of various "minor modes" for Isabelle NEWS, options, session ROOT etc., with completion and SideKick tree view. * Strictly monotonic document update, without premature cancellation of running transactions that are still needed: avoid reset/restart of such command executions while editing. * Support for asynchronous print functions, as overlay to existing document content. * Support for automatic tools in HOL, which try to prove or disprove toplevel theorem statements. * Action isabelle.reset-font-size resets main text area font size according to Isabelle/Scala plugin option "jedit_font_reset_size" (see also "Plugin Options / Isabelle / General"). It can be bound to some keyboard shortcut by the user (e.g. C+0 and/or C+NUMPAD0). * File specifications in jEdit (e.g. file browser) may refer to $ISABELLE_HOME and $ISABELLE_HOME_USER on all platforms. Discontinued obsolete $ISABELLE_HOME_WINDOWS variable. * Improved support for Linux look-and-feel "GTK+", see also "Utilities / Global Options / Appearance". * Improved support of native Mac OS X functionality via "MacOSX" plugin, which is now enabled by default. *** Pure *** * Commands 'interpretation' and 'sublocale' are now target-sensitive. In particular, 'interpretation' allows for non-persistent interpretation within "context ... begin ... end" blocks offering a light-weight alternative to 'sublocale'. See "isar-ref" manual for details. * Improved locales diagnostic command 'print_dependencies'. * Discontinued obsolete 'axioms' command, which has been marked as legacy since Isabelle2009-2. INCOMPATIBILITY, use 'axiomatization' instead, while observing its uniform scope for polymorphism. * Discontinued empty name bindings in 'axiomatization'. INCOMPATIBILITY. * System option "proofs" has been discontinued. Instead the global state of Proofterm.proofs is persistently compiled into logic images as required, notably HOL-Proofs. Users no longer need to change Proofterm.proofs dynamically. Minor INCOMPATIBILITY. * Syntax translation functions (print_translation etc.) always depend on Proof.context. Discontinued former "(advanced)" option -- this is now the default. Minor INCOMPATIBILITY. * Former global reference trace_unify_fail is now available as configuration option "unify_trace_failure" (global context only). * SELECT_GOAL now retains the syntactic context of the overall goal state (schematic variables etc.). Potential INCOMPATIBILITY in rare situations. *** HOL *** * Stronger precedence of syntax for big intersection and union on sets, in accordance with corresponding lattice operations. INCOMPATIBILITY. * Notation "{p:A. P}" now allows tuple patterns as well. * Nested case expressions are now translated in a separate check phase rather than during parsing. The data for case combinators is separated from the datatype package. The declaration attribute "case_translation" can be used to register new case combinators: declare [[case_translation case_combinator constructor1 ... constructorN]] * Code generator: - 'code_printing' unifies 'code_const' / 'code_type' / 'code_class' / 'code_instance'. - 'code_identifier' declares name hints for arbitrary identifiers in generated code, subsuming 'code_modulename'. See the isar-ref manual for syntax diagrams, and the HOL theories for examples. * Attibute 'code': 'code' now declares concrete and abstract code equations uniformly. Use explicit 'code equation' and 'code abstract' to distinguish both when desired. * Discontinued theories Code_Integer and Efficient_Nat by a more fine-grain stack of theories Code_Target_Int, Code_Binary_Nat, Code_Target_Nat and Code_Target_Numeral. See the tutorial on code generation for details. INCOMPATIBILITY. * Numeric types are mapped by default to target language numerals: natural (replaces former code_numeral) and integer (replaces former code_int). Conversions are available as integer_of_natural / natural_of_integer / integer_of_nat / nat_of_integer (in HOL) and Code_Numeral.integer_of_natural / Code_Numeral.natural_of_integer (in ML). INCOMPATIBILITY. * Function package: For mutually recursive functions f and g, separate cases rules f.cases and g.cases are generated instead of unusable f_g.cases which exposed internal sum types. Potential INCOMPATIBILITY, in the case that the unusable rule was used nevertheless. * Function package: For each function f, new rules f.elims are generated, which eliminate equalities of the form "f x = t". * New command 'fun_cases' derives ad-hoc elimination rules for function equations as simplified instances of f.elims, analogous to inductive_cases. See ~~/src/HOL/ex/Fundefs.thy for some examples. * Lifting: - parametrized correspondence relations are now supported: + parametricity theorems for the raw term can be specified in the command lift_definition, which allow us to generate stronger transfer rules + setup_lifting generates stronger transfer rules if parametric correspondence relation can be generated + various new properties of the relator must be specified to support parametricity + parametricity theorem for the Quotient relation can be specified - setup_lifting generates domain rules for the Transfer package - stronger reflexivity prover of respectfulness theorems for type copies - ===> and --> are now local. The symbols can be introduced by interpreting the locale lifting_syntax (typically in an anonymous context) - Lifting/Transfer relevant parts of Library/Quotient_* are now in Main. Potential INCOMPATIBILITY - new commands for restoring and deleting Lifting/Transfer context: lifting_forget, lifting_update - the command print_quotmaps was renamed to print_quot_maps. INCOMPATIBILITY * Transfer: - better support for domains in Transfer: replace Domainp T by the actual invariant in a transferred goal - transfer rules can have as assumptions other transfer rules - Experimental support for transferring from the raw level to the abstract level: Transfer.transferred attribute - Attribute version of the transfer method: untransferred attribute * Reification and reflection: - Reification is now directly available in HOL-Main in structure "Reification". - Reflection now handles multiple lists with variables also. - The whole reflection stack has been decomposed into conversions. INCOMPATIBILITY. * Revised devices for recursive definitions over finite sets: - Only one fundamental fold combinator on finite set remains: Finite_Set.fold :: ('a => 'b => 'b) => 'b => 'a set => 'b This is now identity on infinite sets. - Locales ("mini packages") for fundamental definitions with Finite_Set.fold: folding, folding_idem. - Locales comm_monoid_set, semilattice_order_set and semilattice_neutr_order_set for big operators on sets. See theory Big_Operators for canonical examples. Note that foundational constants comm_monoid_set.F and semilattice_set.F correspond to former combinators fold_image and fold1 respectively. These are now gone. You may use those foundational constants as substitutes, but it is preferable to interpret the above locales accordingly. - Dropped class ab_semigroup_idem_mult (special case of lattice, no longer needed in connection with Finite_Set.fold etc.) - Fact renames: card.union_inter ~> card_Un_Int [symmetric] card.union_disjoint ~> card_Un_disjoint INCOMPATIBILITY. * Locale hierarchy for abstract orderings and (semi)lattices. * Complete_Partial_Order.admissible is defined outside the type class ccpo, but with mandatory prefix ccpo. Admissibility theorems lose the class predicate assumption or sort constraint when possible. INCOMPATIBILITY. * Introduce type class "conditionally_complete_lattice": Like a complete lattice but does not assume the existence of the top and bottom elements. Allows to generalize some lemmas about reals and extended reals. Removed SupInf and replaced it by the instantiation of conditionally_complete_lattice for real. Renamed lemmas about conditionally-complete lattice from Sup_... to cSup_... and from Inf_... to cInf_... to avoid hidding of similar complete lattice lemmas. * Introduce type class linear_continuum as combination of conditionally-complete lattices and inner dense linorders which have more than one element. INCOMPATIBILITY. * Introduced type classes order_top and order_bot. The old classes top and bot only contain the syntax without assumptions. INCOMPATIBILITY: Rename bot -> order_bot, top -> order_top * Introduce type classes "no_top" and "no_bot" for orderings without top and bottom elements. * Split dense_linorder into inner_dense_order and no_top, no_bot. * Complex_Main: Unify and move various concepts from HOL-Multivariate_Analysis to HOL-Complex_Main. - Introduce type class (lin)order_topology and linear_continuum_topology. Allows to generalize theorems about limits and order. Instances are reals and extended reals. - continuous and continuos_on from Multivariate_Analysis: "continuous" is the continuity of a function at a filter. "isCont" is now an abbrevitation: "isCont x f == continuous (at _) f". Generalized continuity lemmas from isCont to continuous on an arbitrary filter. - compact from Multivariate_Analysis. Use Bolzano's lemma to prove compactness of closed intervals on reals. Continuous functions attain infimum and supremum on compact sets. The inverse of a continuous function is continuous, when the function is continuous on a compact set. - connected from Multivariate_Analysis. Use it to prove the intermediate value theorem. Show connectedness of intervals on linear_continuum_topology). - first_countable_topology from Multivariate_Analysis. Is used to show equivalence of properties on the neighbourhood filter of x and on all sequences converging to x. - FDERIV: Definition of has_derivative moved to Deriv.thy. Moved theorems from Library/FDERIV.thy to Deriv.thy and base the definition of DERIV on FDERIV. Add variants of DERIV and FDERIV which are restricted to sets, i.e. to represent derivatives from left or right. - Removed the within-filter. It is replaced by the principal filter: F within X = inf F (principal X) - Introduce "at x within U" as a single constant, "at x" is now an abbreviation for "at x within UNIV" - Introduce named theorem collections tendsto_intros, continuous_intros, continuous_on_intros and FDERIV_intros. Theorems in tendsto_intros (or FDERIV_intros) are also available as tendsto_eq_intros (or FDERIV_eq_intros) where the right-hand side is replaced by a congruence rule. This allows to apply them as intro rules and then proving equivalence by the simplifier. - Restructured theories in HOL-Complex_Main: + Moved RealDef and RComplete into Real + Introduced Topological_Spaces and moved theorems about topological spaces, filters, limits and continuity to it + Renamed RealVector to Real_Vector_Spaces + Split Lim, SEQ, Series into Topological_Spaces, Real_Vector_Spaces, and Limits + Moved Ln and Log to Transcendental + Moved theorems about continuity from Deriv to Topological_Spaces - Remove various auxiliary lemmas. INCOMPATIBILITY. * Nitpick: - Added option "spy". - Reduce incidence of "too high arity" errors. * Sledgehammer: - Renamed option: isar_shrink ~> isar_compress INCOMPATIBILITY. - Added options "isar_try0", "spy". - Better support for "isar_proofs". - MaSh has been fined-tuned and now runs as a local server. * Improved support for ad hoc overloading of constants (see also isar-ref manual and ~~/src/HOL/ex/Adhoc_Overloading_Examples.thy). * Library/Polynomial.thy: - Use lifting for primitive definitions. - Explicit conversions from and to lists of coefficients, used for generated code. - Replaced recursion operator poly_rec by fold_coeffs. - Prefer pre-existing gcd operation for gcd. - Fact renames: poly_eq_iff ~> poly_eq_poly_eq_iff poly_ext ~> poly_eqI expand_poly_eq ~> poly_eq_iff IMCOMPATIBILITY. * New Library/Simps_Case_Conv.thy: Provides commands simps_of_case and case_of_simps to convert function definitions between a list of equations with patterns on the lhs and a single equation with case expressions on the rhs. See also Ex/Simps_Case_Conv_Examples.thy. * New Library/FSet.thy: type of finite sets defined as a subtype of sets defined by Lifting/Transfer. * Discontinued theory src/HOL/Library/Eval_Witness. INCOMPATIBILITY. * Consolidation of library theories on product orders: Product_Lattice ~> Product_Order -- pointwise order on products Product_ord ~> Product_Lexorder -- lexicographic order on products INCOMPATIBILITY. * Imperative-HOL: The MREC combinator is considered legacy and no longer included by default. INCOMPATIBILITY, use partial_function instead, or import theory Legacy_Mrec as a fallback. * HOL-Algebra: Discontinued theories ~~/src/HOL/Algebra/abstract and ~~/src/HOL/Algebra/poly. Existing theories should be based on ~~/src/HOL/Library/Polynomial instead. The latter provides integration with HOL's type classes for rings. INCOMPATIBILITY. * HOL-BNF: - Various improvements to BNF-based (co)datatype package, including new commands "primrec_new", "primcorec", and "datatype_new_compat", as well as documentation. See "datatypes.pdf" for details. - New "coinduction" method to avoid some boilerplate (compared to coinduct). - Renamed keywords: data ~> datatype_new codata ~> codatatype bnf_def ~> bnf - Renamed many generated theorems, including discs ~> disc map_comp' ~> map_comp map_id' ~> map_id sels ~> sel set_map' ~> set_map sets ~> set IMCOMPATIBILITY. *** ML *** * Spec_Check is a Quickcheck tool for Isabelle/ML. The ML function "check_property" allows to check specifications of the form "ALL x y z. prop x y z". See also ~~/src/Tools/Spec_Check/ with its Examples.thy in particular. * Improved printing of exception trace in Poly/ML 5.5.1, with regular tracing output in the command transaction context instead of physical stdout. See also Toplevel.debug, Toplevel.debugging and ML_Compiler.exn_trace. * ML type "theory" is now immutable, without any special treatment of drafts or linear updates (which could lead to "stale theory" errors in the past). Discontinued obsolete operations like Theory.copy, Theory.checkpoint, and the auxiliary type theory_ref. Minor INCOMPATIBILITY. * More uniform naming of goal functions for skipped proofs: Skip_Proof.prove ~> Goal.prove_sorry Skip_Proof.prove_global ~> Goal.prove_sorry_global Minor INCOMPATIBILITY. * Simplifier tactics and tools use proper Proof.context instead of historic type simpset. Old-style declarations like addsimps, addsimprocs etc. operate directly on Proof.context. Raw type simpset retains its use as snapshot of the main Simplifier context, using simpset_of and put_simpset on Proof.context. INCOMPATIBILITY -- port old tools by making them depend on (ctxt : Proof.context) instead of (ss : simpset), then turn (simpset_of ctxt) into ctxt. * Modifiers for classical wrappers (e.g. addWrapper, delWrapper) operate on Proof.context instead of claset, for uniformity with addIs, addEs, addDs etc. Note that claset_of and put_claset allow to manage clasets separately from the context. * Discontinued obsolete ML antiquotations @{claset} and @{simpset}. INCOMPATIBILITY, use @{context} instead. * Antiquotation @{theory_context A} is similar to @{theory A}, but presents the result as initial Proof.context. *** System *** * Discontinued obsolete isabelle usedir, mkdir, make -- superseded by "isabelle build" in Isabelle2013. INCOMPATIBILITY. * Discontinued obsolete isabelle-process options -f and -u (former administrative aliases of option -e). Minor INCOMPATIBILITY. * Discontinued obsolete isabelle print tool, and PRINT_COMMAND settings variable. * Discontinued ISABELLE_DOC_FORMAT settings variable and historic document formats: dvi.gz, ps, ps.gz -- the default document format is always pdf. * Isabelle settings variable ISABELLE_BUILD_JAVA_OPTIONS allows to specify global resources of the JVM process run by isabelle build. * Toplevel executable $ISABELLE_HOME/bin/isabelle_scala_script allows to run Isabelle/Scala source files as standalone programs. * Improved "isabelle keywords" tool (for old-style ProofGeneral keyword tables): use Isabelle/Scala operations, which inspect outer syntax without requiring to build sessions first. * Sessions may be organized via 'chapter' specifications in the ROOT file, which determines a two-level hierarchy of browser info. The old tree-like organization via implicit sub-session relation (with its tendency towards erratic fluctuation of URLs) has been discontinued. The default chapter is called "Unsorted". Potential INCOMPATIBILITY for HTML presentation of theories. New in Isabelle2013 (February 2013) ----------------------------------- *** General *** * Theorem status about oracles and unfinished/failed future proofs is no longer printed by default, since it is incompatible with incremental / parallel checking of the persistent document model. ML function Thm.peek_status may be used to inspect a snapshot of the ongoing evaluation process. Note that in batch mode --- notably isabelle build --- the system ensures that future proofs of all accessible theorems in the theory context are finished (as before). * Configuration option show_markup controls direct inlining of markup into the printed representation of formal entities --- notably type and sort constraints. This enables Prover IDE users to retrieve that information via tooltips in the output window, for example. * Command 'ML_file' evaluates ML text from a file directly within the theory, without any predeclaration via 'uses' in the theory header. * Old command 'use' command and corresponding keyword 'uses' in the theory header are legacy features and will be discontinued soon. Tools that load their additional source files may imitate the 'ML_file' implementation, such that the system can take care of dependencies properly. * Discontinued obsolete method fastsimp / tactic fast_simp_tac, which is called fastforce / fast_force_tac already since Isabelle2011-1. * Updated and extended "isar-ref" and "implementation" manual, reduced remaining material in old "ref" manual. * Improved support for auxiliary contexts that indicate block structure for specifications. Nesting of "context fixes ... context assumes ..." and "class ... context ...". * Attribute "consumes" allows a negative value as well, which is interpreted relatively to the total number of premises of the rule in the target context. This form of declaration is stable when exported from a nested 'context' with additional assumptions. It is the preferred form for definitional packages, notably cases/rules produced in HOL/inductive and HOL/function. * More informative error messages for Isar proof commands involving lazy enumerations (method applications etc.). * Refined 'help' command to retrieve outer syntax commands according to name patterns (with clickable results). *** Prover IDE -- Isabelle/Scala/jEdit *** * Parallel terminal proofs ('by') are enabled by default, likewise proofs that are built into packages like 'datatype', 'function'. This allows to "run ahead" checking the theory specifications on the surface, while the prover is still crunching on internal justifications. Unfinished / cancelled proofs are restarted as required to complete full proof checking eventually. * Improved output panel with tooltips, hyperlinks etc. based on the same Rich_Text_Area as regular Isabelle/jEdit buffers. Activation of tooltips leads to some window that supports the same recursively, which can lead to stacks of tooltips as the semantic document content is explored. ESCAPE closes the whole stack, individual windows may be closed separately, or detached to become independent jEdit dockables. * Improved support for commands that produce graph output: the text message contains a clickable area to open a new instance of the graph browser on demand. * More robust incremental parsing of outer syntax (partial comments, malformed symbols). Changing the balance of open/close quotes and comment delimiters works more conveniently with unfinished situations that frequently occur in user interaction. * More efficient painting and improved reactivity when editing large files. More scalable management of formal document content. * Smarter handling of tracing messages: prover process pauses after certain number of messages per command transaction, with some user dialog to stop or continue. This avoids swamping the front-end with potentially infinite message streams. * More plugin options and preferences, based on Isabelle/Scala. The jEdit plugin option panel provides access to some Isabelle/Scala options, including tuning parameters for editor reactivity and color schemes. * Dockable window "Symbols" provides some editing support for Isabelle symbols. * Dockable window "Monitor" shows ML runtime statistics. Note that continuous display of the chart slows down the system. * Improved editing support for control styles: subscript, superscript, bold, reset of style -- operating on single symbols or text selections. Cf. keyboard shortcuts C+e DOWN/UP/RIGHT/LEFT. * Actions isabelle.increase-font-size and isabelle.decrease-font-size adjust the main text area font size, and its derivatives for output, tooltips etc. Cf. keyboard shortcuts C-PLUS and C-MINUS, which often need to be adapted to local keyboard layouts. * More reactive completion popup by default: use \t (TAB) instead of \n (NEWLINE) to minimize intrusion into regular flow of editing. See also "Plugin Options / SideKick / General / Code Completion Options". * Implicit check and build dialog of the specified logic session image. For example, HOL, HOLCF, HOL-Nominal can be produced on demand, without bundling big platform-dependent heap images in the Isabelle distribution. * Uniform Java 7 platform on Linux, Mac OS X, Windows: recent updates from Oracle provide better multi-platform experience. This version is now bundled exclusively with Isabelle. *** Pure *** * Code generation for Haskell: restrict unqualified imports from Haskell Prelude to a small set of fundamental operations. * Command 'export_code': relative file names are interpreted relatively to master directory of current theory rather than the rather arbitrary current working directory. INCOMPATIBILITY. * Discontinued obsolete attribute "COMP". Potential INCOMPATIBILITY, use regular rule composition via "OF" / "THEN", or explicit proof structure instead. Note that Isabelle/ML provides a variety of operators like COMP, INCR_COMP, COMP_INCR, which need to be applied with some care where this is really required. * Command 'typ' supports an additional variant with explicit sort constraint, to infer and check the most general type conforming to a given sort. Example (in HOL): typ "_ * _ * bool * unit" :: finite * Command 'locale_deps' visualizes all locales and their relations as a Hasse diagram. *** HOL *** * Sledgehammer: - Added MaSh relevance filter based on machine-learning; see the Sledgehammer manual for details. - Polished Isar proofs generated with "isar_proofs" option. - Rationalized type encodings ("type_enc" option). - Renamed "kill_provers" subcommand to "kill_all". - Renamed options: isar_proof ~> isar_proofs isar_shrink_factor ~> isar_shrink max_relevant ~> max_facts relevance_thresholds ~> fact_thresholds * Quickcheck: added an optimisation for equality premises. It is switched on by default, and can be switched off by setting the configuration quickcheck_optimise_equality to false. * Quotient: only one quotient can be defined by quotient_type INCOMPATIBILITY. * Lifting: - generation of an abstraction function equation in lift_definition - quot_del attribute - renamed no_abs_code -> no_code (INCOMPATIBILITY.) * Simproc "finite_Collect" rewrites set comprehensions into pointfree expressions. * Preprocessing of the code generator rewrites set comprehensions into pointfree expressions. * The SMT solver Z3 has now by default a restricted set of directly supported features. For the full set of features (div/mod, nonlinear arithmetic, datatypes/records) with potential proof reconstruction failures, enable the configuration option "z3_with_extensions". Minor INCOMPATIBILITY. * Simplified 'typedef' specifications: historical options for implicit set definition and alternative name have been discontinued. The former behavior of "typedef (open) t = A" is now the default, but written just "typedef t = A". INCOMPATIBILITY, need to adapt theories accordingly. * Removed constant "chars"; prefer "Enum.enum" on type "char" directly. INCOMPATIBILITY. * Moved operation product, sublists and n_lists from theory Enum to List. INCOMPATIBILITY. * Theorem UN_o generalized to SUP_comp. INCOMPATIBILITY. * Class "comm_monoid_diff" formalises properties of bounded subtraction, with natural numbers and multisets as typical instances. * Added combinator "Option.these" with type "'a option set => 'a set". * Theory "Transitive_Closure": renamed lemmas reflcl_tranclp -> reflclp_tranclp rtranclp_reflcl -> rtranclp_reflclp INCOMPATIBILITY. * Theory "Rings": renamed lemmas (in class semiring) left_distrib ~> distrib_right right_distrib ~> distrib_left INCOMPATIBILITY. * Generalized the definition of limits: - Introduced the predicate filterlim (LIM x F. f x :> G) which expresses that when the input values x converge to F then the output f x converges to G. - Added filters for convergence to positive (at_top) and negative infinity (at_bot). - Moved infinity in the norm (at_infinity) from Multivariate_Analysis to Complex_Main. - Removed real_tendsto_inf, it is superseded by "LIM x F. f x :> at_top". INCOMPATIBILITY. * Theory "Library/Option_ord" provides instantiation of option type to lattice type classes. * Theory "Library/Multiset": renamed constant fold_mset ~> Multiset.fold fact fold_mset_commute ~> fold_mset_comm INCOMPATIBILITY. * Renamed theory Library/List_Prefix to Library/Sublist, with related changes as follows. - Renamed constants (and related lemmas) prefix ~> prefixeq strict_prefix ~> prefix - Replaced constant "postfix" by "suffixeq" with swapped argument order (i.e., "postfix xs ys" is now "suffixeq ys xs") and dropped old infix syntax "xs >>= ys"; use "suffixeq ys xs" instead. Renamed lemmas accordingly. - Added constant "list_hembeq" for homeomorphic embedding on lists. Added abbreviation "sublisteq" for special case "list_hembeq (op =)". - Theory Library/Sublist no longer provides "order" and "bot" type class instances for the prefix order (merely corresponding locale interpretations). The type class instances are now in theory Library/Prefix_Order. - The sublist relation of theory Library/Sublist_Order is now based on "Sublist.sublisteq". Renamed lemmas accordingly: le_list_append_le_same_iff ~> Sublist.sublisteq_append_le_same_iff le_list_append_mono ~> Sublist.list_hembeq_append_mono le_list_below_empty ~> Sublist.list_hembeq_Nil, Sublist.list_hembeq_Nil2 le_list_Cons_EX ~> Sublist.list_hembeq_ConsD le_list_drop_Cons2 ~> Sublist.sublisteq_Cons2' le_list_drop_Cons_neq ~> Sublist.sublisteq_Cons2_neq le_list_drop_Cons ~> Sublist.sublisteq_Cons' le_list_drop_many ~> Sublist.sublisteq_drop_many le_list_filter_left ~> Sublist.sublisteq_filter_left le_list_rev_drop_many ~> Sublist.sublisteq_rev_drop_many le_list_rev_take_iff ~> Sublist.sublisteq_append le_list_same_length ~> Sublist.sublisteq_same_length le_list_take_many_iff ~> Sublist.sublisteq_append' less_eq_list.drop ~> less_eq_list_drop less_eq_list.induct ~> less_eq_list_induct not_le_list_length ~> Sublist.not_sublisteq_length INCOMPATIBILITY. * New theory Library/Countable_Set. * Theory Library/Debug and Library/Parallel provide debugging and parallel execution for code generated towards Isabelle/ML. * Theory Library/FuncSet: Extended support for Pi and extensional and introduce the extensional dependent function space "PiE". Replaced extensional_funcset by an abbreviation, and renamed lemmas from extensional_funcset to PiE as follows: extensional_empty ~> PiE_empty extensional_funcset_empty_domain ~> PiE_empty_domain extensional_funcset_empty_range ~> PiE_empty_range extensional_funcset_arb ~> PiE_arb extensional_funcset_mem ~> PiE_mem extensional_funcset_extend_domainI ~> PiE_fun_upd extensional_funcset_restrict_domain ~> fun_upd_in_PiE extensional_funcset_extend_domain_eq ~> PiE_insert_eq card_extensional_funcset ~> card_PiE finite_extensional_funcset ~> finite_PiE INCOMPATIBILITY. * Theory Library/FinFun: theory of almost everywhere constant functions (supersedes the AFP entry "Code Generation for Functions as Data"). * Theory Library/Phantom: generic phantom type to make a type parameter appear in a constant's type. This alternative to adding TYPE('a) as another parameter avoids unnecessary closures in generated code. * Theory Library/RBT_Impl: efficient construction of red-black trees from sorted associative lists. Merging two trees with rbt_union may return a structurally different tree than before. Potential INCOMPATIBILITY. * Theory Library/IArray: immutable arrays with code generation. * Theory Library/Finite_Lattice: theory of finite lattices. * HOL/Multivariate_Analysis: replaced "basis :: 'a::euclidean_space => nat => real" "\\ :: (nat => real) => 'a::euclidean_space" on euclidean spaces by using the inner product "_ \ _" with vectors from the Basis set: "\\ i. f i" is superseded by "SUM i : Basis. f i * r i". With this change the following constants are also changed or removed: DIM('a) :: nat ~> card (Basis :: 'a set) (is an abbreviation) a $$ i ~> inner a i (where i : Basis) cart_base i removed \, \' removed Theorems about these constants where removed. Renamed lemmas: component_le_norm ~> Basis_le_norm euclidean_eq ~> euclidean_eq_iff differential_zero_maxmin_component ~> differential_zero_maxmin_cart euclidean_simps ~> inner_simps independent_basis ~> independent_Basis span_basis ~> span_Basis in_span_basis ~> in_span_Basis norm_bound_component_le ~> norm_boound_Basis_le norm_bound_component_lt ~> norm_boound_Basis_lt component_le_infnorm ~> Basis_le_infnorm INCOMPATIBILITY. * HOL/Probability: - Added simproc "measurable" to automatically prove measurability. - Added induction rules for sigma sets with disjoint union (sigma_sets_induct_disjoint) and for Borel-measurable functions (borel_measurable_induct). - Added the Daniell-Kolmogorov theorem (the existence the limit of a projective family). * HOL/Cardinals: Theories of ordinals and cardinals (supersedes the AFP entry "Ordinals_and_Cardinals"). * HOL/BNF: New (co)datatype package based on bounded natural functors with support for mixed, nested recursion and interesting non-free datatypes. * HOL/Finite_Set and Relation: added new set and relation operations expressed by Finite_Set.fold. * New theory HOL/Library/RBT_Set: implementation of sets by red-black trees for the code generator. * HOL/Library/RBT and HOL/Library/Mapping have been converted to Lifting/Transfer. possible INCOMPATIBILITY. * HOL/Set: renamed Set.project -> Set.filter INCOMPATIBILITY. *** Document preparation *** * Dropped legacy antiquotations "term_style" and "thm_style", since styles may be given as arguments to "term" and "thm" already. Discontinued legacy styles "prem1" .. "prem19". * Default LaTeX rendering for \ is now based on eurosym package, instead of slightly exotic babel/greek. * Document variant NAME may use different LaTeX entry point document/root_NAME.tex if that file exists, instead of the common document/root.tex. * Simplified custom document/build script, instead of old-style document/IsaMakefile. Minor INCOMPATIBILITY. *** ML *** * The default limit for maximum number of worker threads is now 8, instead of 4, in correspondence to capabilities of contemporary hardware and Poly/ML runtime system. * Type Seq.results and related operations support embedded error messages within lazy enumerations, and thus allow to provide informative errors in the absence of any usable results. * Renamed Position.str_of to Position.here to emphasize that this is a formal device to inline positions into message text, but not necessarily printing visible text. *** System *** * Advanced support for Isabelle sessions and build management, see "system" manual for the chapter of that name, especially the "isabelle build" tool and its examples. The "isabelle mkroot" tool prepares session root directories for use with "isabelle build", similar to former "isabelle mkdir" for "isabelle usedir". Note that this affects document preparation as well. INCOMPATIBILITY, isabelle usedir / mkdir / make are rendered obsolete. * Discontinued obsolete Isabelle/build script, it is superseded by the regular isabelle build tool. For example: isabelle build -s -b HOL * Discontinued obsolete "isabelle makeall". * Discontinued obsolete IsaMakefile and ROOT.ML files from the Isabelle distribution, except for rudimentary src/HOL/IsaMakefile that provides some traditional targets that invoke "isabelle build". Note that this is inefficient! Applications of Isabelle/HOL involving "isabelle make" should be upgraded to use "isabelle build" directly. * The "isabelle options" tool prints Isabelle system options, as required for "isabelle build", for example. * The "isabelle logo" tool produces EPS and PDF format simultaneously. Minor INCOMPATIBILITY in command-line options. * The "isabelle install" tool has now a simpler command-line. Minor INCOMPATIBILITY. * The "isabelle components" tool helps to resolve add-on components that are not bundled, or referenced from a bare-bones repository version of Isabelle. * Settings variable ISABELLE_PLATFORM_FAMILY refers to the general platform family: "linux", "macos", "windows". * The ML system is configured as regular component, and no longer picked up from some surrounding directory. Potential INCOMPATIBILITY for home-made settings. * Improved ML runtime statistics (heap, threads, future tasks etc.). * Discontinued support for Poly/ML 5.2.1, which was the last version without exception positions and advanced ML compiler/toplevel configuration. * Discontinued special treatment of Proof General -- no longer guess PROOFGENERAL_HOME based on accidental file-system layout. Minor INCOMPATIBILITY: provide PROOFGENERAL_HOME and PROOFGENERAL_OPTIONS settings manually, or use a Proof General version that has been bundled as Isabelle component. New in Isabelle2012 (May 2012) ------------------------------ *** General *** * Prover IDE (PIDE) improvements: - more robust Sledgehammer integration (as before the sledgehammer command-line needs to be typed into the source buffer) - markup for bound variables - markup for types of term variables (displayed as tooltips) - support for user-defined Isar commands within the running session - improved support for Unicode outside original 16bit range e.g. glyph for \ (thanks to jEdit 4.5.1) * Forward declaration of outer syntax keywords within the theory header -- minor INCOMPATIBILITY for user-defined commands. Allow new commands to be used in the same theory where defined. * Auxiliary contexts indicate block structure for specifications with additional parameters and assumptions. Such unnamed contexts may be nested within other targets, like 'theory', 'locale', 'class', 'instantiation' etc. Results from the local context are generalized accordingly and applied to the enclosing target context. Example: context fixes x y z :: 'a assumes xy: "x = y" and yz: "y = z" begin lemma my_trans: "x = z" using xy yz by simp end thm my_trans The most basic application is to factor-out context elements of several fixes/assumes/shows theorem statements, e.g. see ~~/src/HOL/Isar_Examples/Group_Context.thy Any other local theory specification element works within the "context ... begin ... end" block as well. * Bundled declarations associate attributed fact expressions with a given name in the context. These may be later included in other contexts. This allows to manage context extensions casually, without the logical dependencies of locales and locale interpretation. See commands 'bundle', 'include', 'including' etc. in the isar-ref manual. * Commands 'lemmas' and 'theorems' allow local variables using 'for' declaration, and results are standardized before being stored. Thus old-style "standard" after instantiation or composition of facts becomes obsolete. Minor INCOMPATIBILITY, due to potential change of indices of schematic variables. * Rule attributes in local theory declarations (e.g. locale or class) are now statically evaluated: the resulting theorem is stored instead of the original expression. INCOMPATIBILITY in rare situations, where the historic accident of dynamic re-evaluation in interpretations etc. was exploited. * New tutorial "Programming and Proving in Isabelle/HOL" ("prog-prove"). It completely supersedes "A Tutorial Introduction to Structured Isar Proofs" ("isar-overview"), which has been removed. It also supersedes "Isabelle/HOL, A Proof Assistant for Higher-Order Logic" as the recommended beginners tutorial, but does not cover all of the material of that old tutorial. * Updated and extended reference manuals: "isar-ref", "implementation", "system"; reduced remaining material in old "ref" manual. *** Pure *** * Command 'definition' no longer exports the foundational "raw_def" into the user context. Minor INCOMPATIBILITY, may use the regular "def" result with attribute "abs_def" to imitate the old version. * Attribute "abs_def" turns an equation of the form "f x y == t" into "f == %x y. t", which ensures that "simp" or "unfold" steps always expand it. This also works for object-logic equality. (Formerly undocumented feature.) * Sort constraints are now propagated in simultaneous statements, just like type constraints. INCOMPATIBILITY in rare situations, where distinct sorts used to be assigned accidentally. For example: lemma "P (x::'a::foo)" and "Q (y::'a::bar)" -- "now illegal" lemma "P (x::'a)" and "Q (y::'a::bar)" -- "now uniform 'a::bar instead of default sort for first occurrence (!)" * Rule composition via attribute "OF" (or ML functions OF/MRS) is more tolerant against multiple unifiers, as long as the final result is unique. (As before, rules are composed in canonical right-to-left order to accommodate newly introduced premises.) * Renamed some inner syntax categories: num ~> num_token xnum ~> xnum_token xstr ~> str_token Minor INCOMPATIBILITY. Note that in practice "num_const" or "num_position" etc. are mainly used instead (which also include position information via constraints). * Simplified configuration options for syntax ambiguity: see "syntax_ambiguity_warning" and "syntax_ambiguity_limit" in isar-ref manual. Minor INCOMPATIBILITY. * Discontinued configuration option "syntax_positions": atomic terms in parse trees are always annotated by position constraints. * Old code generator for SML and its commands 'code_module', 'code_library', 'consts_code', 'types_code' have been discontinued. Use commands of the generic code generator instead. INCOMPATIBILITY. * Redundant attribute "code_inline" has been discontinued. Use "code_unfold" instead. INCOMPATIBILITY. * Dropped attribute "code_unfold_post" in favor of the its dual "code_abbrev", which yields a common pattern in definitions like definition [code_abbrev]: "f = t" INCOMPATIBILITY. * Obsolete 'types' command has been discontinued. Use 'type_synonym' instead. INCOMPATIBILITY. * Discontinued old "prems" fact, which used to refer to the accidental collection of foundational premises in the context (already marked as legacy since Isabelle2011). *** HOL *** * Type 'a set is now a proper type constructor (just as before Isabelle2008). Definitions mem_def and Collect_def have disappeared. Non-trivial INCOMPATIBILITY. For developments keeping predicates and sets separate, it is often sufficient to rephrase some set S that has been accidentally used as predicates by "%x. x : S", and some predicate P that has been accidentally used as set by "{x. P x}". Corresponding proofs in a first step should be pruned from any tinkering with former theorems mem_def and Collect_def as far as possible. For developments which deliberately mix predicates and sets, a planning step is necessary to determine what should become a predicate and what a set. It can be helpful to carry out that step in Isabelle2011-1 before jumping right into the current release. * Code generation by default implements sets as container type rather than predicates. INCOMPATIBILITY. * New type synonym 'a rel = ('a * 'a) set * The representation of numerals has changed. Datatype "num" represents strictly positive binary numerals, along with functions "numeral :: num => 'a" and "neg_numeral :: num => 'a" to represent positive and negated numeric literals, respectively. See also definitions in ~~/src/HOL/Num.thy. Potential INCOMPATIBILITY, some user theories may require adaptations as follows: - Theorems with number_ring or number_semiring constraints: These classes are gone; use comm_ring_1 or comm_semiring_1 instead. - Theories defining numeric types: Remove number, number_semiring, and number_ring instances. Defer all theorems about numerals until after classes one and semigroup_add have been instantiated. - Numeral-only simp rules: Replace each rule having a "number_of v" pattern with two copies, one for numeral and one for neg_numeral. - Theorems about subclasses of semiring_1 or ring_1: These classes automatically support numerals now, so more simp rules and simprocs may now apply within the proof. - Definitions and theorems using old constructors Pls/Min/Bit0/Bit1: Redefine using other integer operations. * Transfer: New package intended to generalize the existing "descending" method and related theorem attributes from the Quotient package. (Not all functionality is implemented yet, but future development will focus on Transfer as an eventual replacement for the corresponding parts of the Quotient package.) - transfer_rule attribute: Maintains a collection of transfer rules, which relate constants at two different types. Transfer rules may relate different type instances of the same polymorphic constant, or they may relate an operation on a raw type to a corresponding operation on an abstract type (quotient or subtype). For example: ((A ===> B) ===> list_all2 A ===> list_all2 B) map map (cr_int ===> cr_int ===> cr_int) (%(x,y) (u,v). (x+u, y+v)) plus_int - transfer method: Replaces a subgoal on abstract types with an equivalent subgoal on the corresponding raw types. Constants are replaced with corresponding ones according to the transfer rules. Goals are generalized over all free variables by default; this is necessary for variables whose types change, but can be overridden for specific variables with e.g. "transfer fixing: x y z". The variant transfer' method allows replacing a subgoal with one that is logically stronger (rather than equivalent). - relator_eq attribute: Collects identity laws for relators of various type constructors, e.g. "list_all2 (op =) = (op =)". The transfer method uses these lemmas to infer transfer rules for non-polymorphic constants on the fly. - transfer_prover method: Assists with proving a transfer rule for a new constant, provided the constant is defined in terms of other constants that already have transfer rules. It should be applied after unfolding the constant definitions. - HOL/ex/Transfer_Int_Nat.thy: Example theory demonstrating transfer from type nat to type int. * Lifting: New package intended to generalize the quotient_definition facility of the Quotient package; designed to work with Transfer. - lift_definition command: Defines operations on an abstract type in terms of a corresponding operation on a representation type. Example syntax: lift_definition dlist_insert :: "'a => 'a dlist => 'a dlist" is List.insert Users must discharge a respectfulness proof obligation when each constant is defined. (For a type copy, i.e. a typedef with UNIV, the proof is discharged automatically.) The obligation is presented in a user-friendly, readable form; a respectfulness theorem in the standard format and a transfer rule are generated by the package. - Integration with code_abstype: For typedefs (e.g. subtypes corresponding to a datatype invariant, such as dlist), lift_definition generates a code certificate theorem and sets up code generation for each constant. - setup_lifting command: Sets up the Lifting package to work with a user-defined type. The user must provide either a quotient theorem or a type_definition theorem. The package configures transfer rules for equality and quantifiers on the type, and sets up the lift_definition command to work with the type. - Usage examples: See Quotient_Examples/Lift_DList.thy, Quotient_Examples/Lift_RBT.thy, Quotient_Examples/Lift_FSet.thy, Word/Word.thy and Library/Float.thy. * Quotient package: - The 'quotient_type' command now supports a 'morphisms' option with rep and abs functions, similar to typedef. - 'quotient_type' sets up new types to work with the Lifting and Transfer packages, as with 'setup_lifting'. - The 'quotient_definition' command now requires the user to prove a respectfulness property at the point where the constant is defined, similar to lift_definition; INCOMPATIBILITY. - Renamed predicate 'Quotient' to 'Quotient3', and renamed theorems accordingly, INCOMPATIBILITY. * New diagnostic command 'find_unused_assms' to find potentially superfluous assumptions in theorems using Quickcheck. * Quickcheck: - Quickcheck returns variable assignments as counterexamples, which allows to reveal the underspecification of functions under test. For example, refuting "hd xs = x", it presents the variable assignment xs = [] and x = a1 as a counterexample, assuming that any property is false whenever "hd []" occurs in it. These counterexample are marked as potentially spurious, as Quickcheck also returns "xs = []" as a counterexample to the obvious theorem "hd xs = hd xs". After finding a potentially spurious counterexample, Quickcheck continues searching for genuine ones. By default, Quickcheck shows potentially spurious and genuine counterexamples. The option "genuine_only" sets quickcheck to only show genuine counterexamples. - The command 'quickcheck_generator' creates random and exhaustive value generators for a given type and operations. It generates values by using the operations as if they were constructors of that type. - Support for multisets. - Added "use_subtype" options. - Added "quickcheck_locale" configuration to specify how to process conjectures in a locale context. * Nitpick: Fixed infinite loop caused by the 'peephole_optim' option and affecting 'rat' and 'real'. * Sledgehammer: - Integrated more tightly with SPASS, as described in the ITP 2012 paper "More SPASS with Isabelle". - Made it try "smt" as a fallback if "metis" fails or times out. - Added support for the following provers: Alt-Ergo (via Why3 and TFF1), iProver, iProver-Eq. - Sped up the minimizer. - Added "lam_trans", "uncurry_aliases", and "minimize" options. - Renamed "slicing" ("no_slicing") option to "slice" ("dont_slice"). - Renamed "sound" option to "strict". * Metis: Added possibility to specify lambda translations scheme as a parenthesized argument (e.g., "by (metis (lifting) ...)"). * SMT: Renamed "smt_fixed" option to "smt_read_only_certificates". * Command 'try0': Renamed from 'try_methods'. INCOMPATIBILITY. * New "case_product" attribute to generate a case rule doing multiple case distinctions at the same time. E.g. list.exhaust [case_product nat.exhaust] produces a rule which can be used to perform case distinction on both a list and a nat. * New "eventually_elim" method as a generalized variant of the eventually_elim* rules. Supports structured proofs. * Typedef with implicit set definition is considered legacy. Use "typedef (open)" form instead, which will eventually become the default. * Record: code generation can be switched off manually with declare [[record_coden = false]] -- "default true" * Datatype: type parameters allow explicit sort constraints. * Concrete syntax for case expressions includes constraints for source positions, and thus produces Prover IDE markup for its bindings. INCOMPATIBILITY for old-style syntax translations that augment the pattern notation; e.g. see src/HOL/HOLCF/One.thy for translations of one_case. * Clarified attribute "mono_set": pure declaration without modifying the result of the fact expression. * More default pred/set conversions on a couple of relation operations and predicates. Added powers of predicate relations. Consolidation of some relation theorems: converse_def ~> converse_unfold rel_comp_def ~> relcomp_unfold symp_def ~> (modified, use symp_def and sym_def instead) transp_def ~> transp_trans Domain_def ~> Domain_unfold Range_def ~> Domain_converse [symmetric] Generalized theorems INF_INT_eq, INF_INT_eq2, SUP_UN_eq, SUP_UN_eq2. See theory "Relation" for examples for making use of pred/set conversions by means of attributes "to_set" and "to_pred". INCOMPATIBILITY. * Renamed facts about the power operation on relations, i.e., relpow to match the constant's name: rel_pow_1 ~> relpow_1 rel_pow_0_I ~> relpow_0_I rel_pow_Suc_I ~> relpow_Suc_I rel_pow_Suc_I2 ~> relpow_Suc_I2 rel_pow_0_E ~> relpow_0_E rel_pow_Suc_E ~> relpow_Suc_E rel_pow_E ~> relpow_E rel_pow_Suc_D2 ~> relpow_Suc_D2 rel_pow_Suc_E2 ~> relpow_Suc_E2 rel_pow_Suc_D2' ~> relpow_Suc_D2' rel_pow_E2 ~> relpow_E2 rel_pow_add ~> relpow_add rel_pow_commute ~> relpow rel_pow_empty ~> relpow_empty: rtrancl_imp_UN_rel_pow ~> rtrancl_imp_UN_relpow rel_pow_imp_rtrancl ~> relpow_imp_rtrancl rtrancl_is_UN_rel_pow ~> rtrancl_is_UN_relpow rtrancl_imp_rel_pow ~> rtrancl_imp_relpow rel_pow_fun_conv ~> relpow_fun_conv rel_pow_finite_bounded1 ~> relpow_finite_bounded1 rel_pow_finite_bounded ~> relpow_finite_bounded rtrancl_finite_eq_rel_pow ~> rtrancl_finite_eq_relpow trancl_finite_eq_rel_pow ~> trancl_finite_eq_relpow single_valued_rel_pow ~> single_valued_relpow INCOMPATIBILITY. * Theory Relation: Consolidated constant name for relation composition and corresponding theorem names: - Renamed constant rel_comp to relcomp. - Dropped abbreviation pred_comp. Use relcompp instead. - Renamed theorems: rel_compI ~> relcompI rel_compEpair ~> relcompEpair rel_compE ~> relcompE pred_comp_rel_comp_eq ~> relcompp_relcomp_eq rel_comp_empty1 ~> relcomp_empty1 rel_comp_mono ~> relcomp_mono rel_comp_subset_Sigma ~> relcomp_subset_Sigma rel_comp_distrib ~> relcomp_distrib rel_comp_distrib2 ~> relcomp_distrib2 rel_comp_UNION_distrib ~> relcomp_UNION_distrib rel_comp_UNION_distrib2 ~> relcomp_UNION_distrib2 single_valued_rel_comp ~> single_valued_relcomp rel_comp_def ~> relcomp_unfold converse_rel_comp ~> converse_relcomp pred_compI ~> relcomppI pred_compE ~> relcomppE pred_comp_bot1 ~> relcompp_bot1 pred_comp_bot2 ~> relcompp_bot2 transp_pred_comp_less_eq ~> transp_relcompp_less_eq pred_comp_mono ~> relcompp_mono pred_comp_distrib ~> relcompp_distrib pred_comp_distrib2 ~> relcompp_distrib2 converse_pred_comp ~> converse_relcompp finite_rel_comp ~> finite_relcomp set_rel_comp ~> set_relcomp INCOMPATIBILITY. * Theory Divides: Discontinued redundant theorems about div and mod. INCOMPATIBILITY, use the corresponding generic theorems instead. DIVISION_BY_ZERO ~> div_by_0, mod_by_0 zdiv_self ~> div_self zmod_self ~> mod_self zdiv_zero ~> div_0 zmod_zero ~> mod_0 zdiv_zmod_equality ~> div_mod_equality2 zdiv_zmod_equality2 ~> div_mod_equality zmod_zdiv_trivial ~> mod_div_trivial zdiv_zminus_zminus ~> div_minus_minus zmod_zminus_zminus ~> mod_minus_minus zdiv_zminus2 ~> div_minus_right zmod_zminus2 ~> mod_minus_right zdiv_minus1_right ~> div_minus1_right zmod_minus1_right ~> mod_minus1_right zdvd_mult_div_cancel ~> dvd_mult_div_cancel zmod_zmult1_eq ~> mod_mult_right_eq zpower_zmod ~> power_mod zdvd_zmod ~> dvd_mod zdvd_zmod_imp_zdvd ~> dvd_mod_imp_dvd mod_mult_distrib ~> mult_mod_left mod_mult_distrib2 ~> mult_mod_right * Removed redundant theorems nat_mult_2 and nat_mult_2_right; use generic mult_2 and mult_2_right instead. INCOMPATIBILITY. * Finite_Set.fold now qualified. INCOMPATIBILITY. * Consolidated theorem names concerning fold combinators: inf_INFI_fold_inf ~> inf_INF_fold_inf sup_SUPR_fold_sup ~> sup_SUP_fold_sup INFI_fold_inf ~> INF_fold_inf SUPR_fold_sup ~> SUP_fold_sup union_set ~> union_set_fold minus_set ~> minus_set_fold INFI_set_fold ~> INF_set_fold SUPR_set_fold ~> SUP_set_fold INF_code ~> INF_set_foldr SUP_code ~> SUP_set_foldr foldr.simps ~> foldr.simps (in point-free formulation) foldr_fold_rev ~> foldr_conv_fold foldl_fold ~> foldl_conv_fold foldr_foldr ~> foldr_conv_foldl foldl_foldr ~> foldl_conv_foldr fold_set_remdups ~> fold_set_fold_remdups fold_set ~> fold_set_fold fold1_set ~> fold1_set_fold INCOMPATIBILITY. * Dropped rarely useful theorems concerning fold combinators: foldl_apply, foldl_fun_comm, foldl_rev, fold_weak_invariant, rev_foldl_cons, fold_set_remdups, fold_set, fold_set1, concat_conv_foldl, foldl_weak_invariant, foldl_invariant, foldr_invariant, foldl_absorb0, foldl_foldr1_lemma, foldl_foldr1, listsum_conv_fold, listsum_foldl, sort_foldl_insort, foldl_assoc, foldr_conv_foldl, start_le_sum, elem_le_sum, sum_eq_0_conv. INCOMPATIBILITY. For the common phrases "%xs. List.foldr plus xs 0" and "List.foldl plus 0", prefer "List.listsum". Otherwise it can be useful to boil down "List.foldr" and "List.foldl" to "List.fold" by unfolding "foldr_conv_fold" and "foldl_conv_fold". * Dropped lemmas minus_set_foldr, union_set_foldr, union_coset_foldr, inter_coset_foldr, Inf_fin_set_foldr, Sup_fin_set_foldr, Min_fin_set_foldr, Max_fin_set_foldr, Inf_set_foldr, Sup_set_foldr, INF_set_foldr, SUP_set_foldr. INCOMPATIBILITY. Prefer corresponding lemmas over fold rather than foldr, or make use of lemmas fold_conv_foldr and fold_rev. * Congruence rules Option.map_cong and Option.bind_cong for recursion through option types. * "Transitive_Closure.ntrancl": bounded transitive closure on relations. * Constant "Set.not_member" now qualified. INCOMPATIBILITY. * Theory Int: Discontinued many legacy theorems specific to type int. INCOMPATIBILITY, use the corresponding generic theorems instead. zminus_zminus ~> minus_minus zminus_0 ~> minus_zero zminus_zadd_distrib ~> minus_add_distrib zadd_commute ~> add_commute zadd_assoc ~> add_assoc zadd_left_commute ~> add_left_commute zadd_ac ~> add_ac zmult_ac ~> mult_ac zadd_0 ~> add_0_left zadd_0_right ~> add_0_right zadd_zminus_inverse2 ~> left_minus zmult_zminus ~> mult_minus_left zmult_commute ~> mult_commute zmult_assoc ~> mult_assoc zadd_zmult_distrib ~> left_distrib zadd_zmult_distrib2 ~> right_distrib zdiff_zmult_distrib ~> left_diff_distrib zdiff_zmult_distrib2 ~> right_diff_distrib zmult_1 ~> mult_1_left zmult_1_right ~> mult_1_right zle_refl ~> order_refl zle_trans ~> order_trans zle_antisym ~> order_antisym zle_linear ~> linorder_linear zless_linear ~> linorder_less_linear zadd_left_mono ~> add_left_mono zadd_strict_right_mono ~> add_strict_right_mono zadd_zless_mono ~> add_less_le_mono int_0_less_1 ~> zero_less_one int_0_neq_1 ~> zero_neq_one zless_le ~> less_le zpower_zadd_distrib ~> power_add zero_less_zpower_abs_iff ~> zero_less_power_abs_iff zero_le_zpower_abs ~> zero_le_power_abs * Theory Deriv: Renamed DERIV_nonneg_imp_nonincreasing ~> DERIV_nonneg_imp_nondecreasing * Theory Library/Multiset: Improved code generation of multisets. * Theory HOL/Library/Set_Algebras: Addition and multiplication on sets are expressed via type classes again. The special syntax \/\ has been replaced by plain +/*. Removed constant setsum_set, which is now subsumed by Big_Operators.setsum. INCOMPATIBILITY. * Theory HOL/Library/Diagonalize has been removed. INCOMPATIBILITY, use theory HOL/Library/Nat_Bijection instead. * Theory HOL/Library/RBT_Impl: Backing implementation of red-black trees is now inside a type class context. Names of affected operations and lemmas have been prefixed by rbt_. INCOMPATIBILITY for theories working directly with raw red-black trees, adapt the names as follows: Operations: bulkload -> rbt_bulkload del_from_left -> rbt_del_from_left del_from_right -> rbt_del_from_right del -> rbt_del delete -> rbt_delete ins -> rbt_ins insert -> rbt_insert insertw -> rbt_insert_with insert_with_key -> rbt_insert_with_key map_entry -> rbt_map_entry lookup -> rbt_lookup sorted -> rbt_sorted tree_greater -> rbt_greater tree_less -> rbt_less tree_less_symbol -> rbt_less_symbol union -> rbt_union union_with -> rbt_union_with union_with_key -> rbt_union_with_key Lemmas: balance_left_sorted -> balance_left_rbt_sorted balance_left_tree_greater -> balance_left_rbt_greater balance_left_tree_less -> balance_left_rbt_less balance_right_sorted -> balance_right_rbt_sorted balance_right_tree_greater -> balance_right_rbt_greater balance_right_tree_less -> balance_right_rbt_less balance_sorted -> balance_rbt_sorted balance_tree_greater -> balance_rbt_greater balance_tree_less -> balance_rbt_less bulkload_is_rbt -> rbt_bulkload_is_rbt combine_sorted -> combine_rbt_sorted combine_tree_greater -> combine_rbt_greater combine_tree_less -> combine_rbt_less delete_in_tree -> rbt_delete_in_tree delete_is_rbt -> rbt_delete_is_rbt del_from_left_tree_greater -> rbt_del_from_left_rbt_greater del_from_left_tree_less -> rbt_del_from_left_rbt_less del_from_right_tree_greater -> rbt_del_from_right_rbt_greater del_from_right_tree_less -> rbt_del_from_right_rbt_less del_in_tree -> rbt_del_in_tree del_inv1_inv2 -> rbt_del_inv1_inv2 del_sorted -> rbt_del_rbt_sorted del_tree_greater -> rbt_del_rbt_greater del_tree_less -> rbt_del_rbt_less dom_lookup_Branch -> dom_rbt_lookup_Branch entries_lookup -> entries_rbt_lookup finite_dom_lookup -> finite_dom_rbt_lookup insert_sorted -> rbt_insert_rbt_sorted insertw_is_rbt -> rbt_insertw_is_rbt insertwk_is_rbt -> rbt_insertwk_is_rbt insertwk_sorted -> rbt_insertwk_rbt_sorted insertw_sorted -> rbt_insertw_rbt_sorted ins_sorted -> ins_rbt_sorted ins_tree_greater -> ins_rbt_greater ins_tree_less -> ins_rbt_less is_rbt_sorted -> is_rbt_rbt_sorted lookup_balance -> rbt_lookup_balance lookup_bulkload -> rbt_lookup_rbt_bulkload lookup_delete -> rbt_lookup_rbt_delete lookup_Empty -> rbt_lookup_Empty lookup_from_in_tree -> rbt_lookup_from_in_tree lookup_in_tree -> rbt_lookup_in_tree lookup_ins -> rbt_lookup_ins lookup_insert -> rbt_lookup_rbt_insert lookup_insertw -> rbt_lookup_rbt_insertw lookup_insertwk -> rbt_lookup_rbt_insertwk lookup_keys -> rbt_lookup_keys lookup_map -> rbt_lookup_map lookup_map_entry -> rbt_lookup_rbt_map_entry lookup_tree_greater -> rbt_lookup_rbt_greater lookup_tree_less -> rbt_lookup_rbt_less lookup_union -> rbt_lookup_rbt_union map_entry_color_of -> rbt_map_entry_color_of map_entry_inv1 -> rbt_map_entry_inv1 map_entry_inv2 -> rbt_map_entry_inv2 map_entry_is_rbt -> rbt_map_entry_is_rbt map_entry_sorted -> rbt_map_entry_rbt_sorted map_entry_tree_greater -> rbt_map_entry_rbt_greater map_entry_tree_less -> rbt_map_entry_rbt_less map_tree_greater -> map_rbt_greater map_tree_less -> map_rbt_less map_sorted -> map_rbt_sorted paint_sorted -> paint_rbt_sorted paint_lookup -> paint_rbt_lookup paint_tree_greater -> paint_rbt_greater paint_tree_less -> paint_rbt_less sorted_entries -> rbt_sorted_entries tree_greater_eq_trans -> rbt_greater_eq_trans tree_greater_nit -> rbt_greater_nit tree_greater_prop -> rbt_greater_prop tree_greater_simps -> rbt_greater_simps tree_greater_trans -> rbt_greater_trans tree_less_eq_trans -> rbt_less_eq_trans tree_less_nit -> rbt_less_nit tree_less_prop -> rbt_less_prop tree_less_simps -> rbt_less_simps tree_less_trans -> rbt_less_trans tree_ord_props -> rbt_ord_props union_Branch -> rbt_union_Branch union_is_rbt -> rbt_union_is_rbt unionw_is_rbt -> rbt_unionw_is_rbt unionwk_is_rbt -> rbt_unionwk_is_rbt unionwk_sorted -> rbt_unionwk_rbt_sorted * Theory HOL/Library/Float: Floating point numbers are now defined as a subset of the real numbers. All operations are defined using the lifing-framework and proofs use the transfer method. INCOMPATIBILITY. Changed Operations: float_abs -> abs float_nprt -> nprt float_pprt -> pprt pow2 -> use powr round_down -> float_round_down round_up -> float_round_up scale -> exponent Removed Operations: ceiling_fl, lb_mult, lb_mod, ub_mult, ub_mod Renamed Lemmas: abs_float_def -> Float.compute_float_abs bitlen_ge0 -> bitlen_nonneg bitlen.simps -> Float.compute_bitlen float_components -> Float_mantissa_exponent float_divl.simps -> Float.compute_float_divl float_divr.simps -> Float.compute_float_divr float_eq_odd -> mult_powr_eq_mult_powr_iff float_power -> real_of_float_power lapprox_posrat_def -> Float.compute_lapprox_posrat lapprox_rat.simps -> Float.compute_lapprox_rat le_float_def' -> Float.compute_float_le le_float_def -> less_eq_float.rep_eq less_float_def' -> Float.compute_float_less less_float_def -> less_float.rep_eq normfloat_def -> Float.compute_normfloat normfloat_imp_odd_or_zero -> mantissa_not_dvd and mantissa_noteq_0 normfloat -> normfloat_def normfloat_unique -> use normfloat_def number_of_float_Float -> Float.compute_float_numeral, Float.compute_float_neg_numeral one_float_def -> Float.compute_float_one plus_float_def -> Float.compute_float_plus rapprox_posrat_def -> Float.compute_rapprox_posrat rapprox_rat.simps -> Float.compute_rapprox_rat real_of_float_0 -> zero_float.rep_eq real_of_float_1 -> one_float.rep_eq real_of_float_abs -> abs_float.rep_eq real_of_float_add -> plus_float.rep_eq real_of_float_minus -> uminus_float.rep_eq real_of_float_mult -> times_float.rep_eq real_of_float_simp -> Float.rep_eq real_of_float_sub -> minus_float.rep_eq round_down.simps -> Float.compute_float_round_down round_up.simps -> Float.compute_float_round_up times_float_def -> Float.compute_float_times uminus_float_def -> Float.compute_float_uminus zero_float_def -> Float.compute_float_zero Lemmas not necessary anymore, use the transfer method: bitlen_B0, bitlen_B1, bitlen_ge1, bitlen_Min, bitlen_Pls, float_divl, float_divr, float_le_simp, float_less1_mantissa_bound, float_less_simp, float_less_zero, float_le_zero, float_pos_less1_e_neg, float_pos_m_pos, float_split, float_split2, floor_pos_exp, lapprox_posrat, lapprox_posrat_bottom, lapprox_rat, lapprox_rat_bottom, normalized_float, rapprox_posrat, rapprox_posrat_le1, rapprox_rat, real_of_float_ge0_exp, real_of_float_neg_exp, real_of_float_nge0_exp, round_down floor_fl, round_up, zero_le_float, zero_less_float * New theory HOL/Library/DAList provides an abstract type for association lists with distinct keys. * Session HOL/IMP: Added new theory of abstract interpretation of annotated commands. * Session HOL-Import: Re-implementation from scratch is faster, simpler, and more scalable. Requires a proof bundle, which is available as an external component. Discontinued old (and mostly dead) Importer for HOL4 and HOL Light. INCOMPATIBILITY. * Session HOL-Word: Discontinued many redundant theorems specific to type 'a word. INCOMPATIBILITY, use the corresponding generic theorems instead. word_sub_alt ~> word_sub_wi word_add_alt ~> word_add_def word_mult_alt ~> word_mult_def word_minus_alt ~> word_minus_def word_0_alt ~> word_0_wi word_1_alt ~> word_1_wi word_add_0 ~> add_0_left word_add_0_right ~> add_0_right word_mult_1 ~> mult_1_left word_mult_1_right ~> mult_1_right word_add_commute ~> add_commute word_add_assoc ~> add_assoc word_add_left_commute ~> add_left_commute word_mult_commute ~> mult_commute word_mult_assoc ~> mult_assoc word_mult_left_commute ~> mult_left_commute word_left_distrib ~> left_distrib word_right_distrib ~> right_distrib word_left_minus ~> left_minus word_diff_0_right ~> diff_0_right word_diff_self ~> diff_self word_sub_def ~> diff_minus word_diff_minus ~> diff_minus word_add_ac ~> add_ac word_mult_ac ~> mult_ac word_plus_ac0 ~> add_0_left add_0_right add_ac word_times_ac1 ~> mult_1_left mult_1_right mult_ac word_order_trans ~> order_trans word_order_refl ~> order_refl word_order_antisym ~> order_antisym word_order_linear ~> linorder_linear lenw1_zero_neq_one ~> zero_neq_one word_number_of_eq ~> number_of_eq word_of_int_add_hom ~> wi_hom_add word_of_int_sub_hom ~> wi_hom_sub word_of_int_mult_hom ~> wi_hom_mult word_of_int_minus_hom ~> wi_hom_neg word_of_int_succ_hom ~> wi_hom_succ word_of_int_pred_hom ~> wi_hom_pred word_of_int_0_hom ~> word_0_wi word_of_int_1_hom ~> word_1_wi * Session HOL-Word: New proof method "word_bitwise" for splitting machine word equalities and inequalities into logical circuits, defined in HOL/Word/WordBitwise.thy. Supports addition, subtraction, multiplication, shifting by constants, bitwise operators and numeric constants. Requires fixed-length word types, not 'a word. Solves many standard word identities outright and converts more into first order problems amenable to blast or similar. See also examples in HOL/Word/Examples/WordExamples.thy. * Session HOL-Probability: Introduced the type "'a measure" to represent measures, this replaces the records 'a algebra and 'a measure_space. The locales based on subset_class now have two locale-parameters the space \ and the set of measurable sets M. The product of probability spaces uses now the same constant as the finite product of sigma-finite measure spaces "PiM :: ('i => 'a) measure". Most constants are defined now outside of locales and gain an additional parameter, like null_sets, almost_eventually or \'. Measure space constructions for distributions and densities now got their own constants distr and density. Instead of using locales to describe measure spaces with a finite space, the measure count_space and point_measure is introduced. INCOMPATIBILITY. Renamed constants: measure -> emeasure finite_measure.\' -> measure product_algebra_generator -> prod_algebra product_prob_space.emb -> prod_emb product_prob_space.infprod_algebra -> PiM Removed locales: completeable_measure_space finite_measure_space finite_prob_space finite_product_finite_prob_space finite_product_sigma_algebra finite_sigma_algebra measure_space pair_finite_prob_space pair_finite_sigma_algebra pair_finite_space pair_sigma_algebra product_sigma_algebra Removed constants: conditional_space distribution -> use distr measure, or distributed predicate image_space joint_distribution -> use distr measure, or distributed predicate pair_measure_generator product_prob_space.infprod_algebra -> use PiM subvimage Replacement theorems: finite_additivity_sufficient -> ring_of_sets.countably_additiveI_finite finite_measure.empty_measure -> measure_empty finite_measure.finite_continuity_from_above -> finite_measure.finite_Lim_measure_decseq finite_measure.finite_continuity_from_below -> finite_measure.finite_Lim_measure_incseq finite_measure.finite_measure_countably_subadditive -> finite_measure.finite_measure_subadditive_countably finite_measure.finite_measure_eq -> finite_measure.emeasure_eq_measure finite_measure.finite_measure -> finite_measure.emeasure_finite finite_measure.finite_measure_finite_singleton -> finite_measure.finite_measure_eq_setsum_singleton finite_measure.positive_measure' -> measure_nonneg finite_measure.real_measure -> finite_measure.emeasure_real finite_product_prob_space.finite_measure_times -> finite_product_prob_space.finite_measure_PiM_emb finite_product_sigma_algebra.in_P -> sets_PiM_I_finite finite_product_sigma_algebra.P_empty -> space_PiM_empty, sets_PiM_empty information_space.conditional_entropy_eq -> information_space.conditional_entropy_simple_distributed information_space.conditional_entropy_positive -> information_space.conditional_entropy_nonneg_simple information_space.conditional_mutual_information_eq_mutual_information -> information_space.conditional_mutual_information_eq_mutual_information_simple information_space.conditional_mutual_information_generic_positive -> information_space.conditional_mutual_information_nonneg_simple information_space.conditional_mutual_information_positive -> information_space.conditional_mutual_information_nonneg_simple information_space.entropy_commute -> information_space.entropy_commute_simple information_space.entropy_eq -> information_space.entropy_simple_distributed information_space.entropy_generic_eq -> information_space.entropy_simple_distributed information_space.entropy_positive -> information_space.entropy_nonneg_simple information_space.entropy_uniform_max -> information_space.entropy_uniform information_space.KL_eq_0_imp -> information_space.KL_eq_0_iff_eq information_space.KL_eq_0 -> information_space.KL_same_eq_0 information_space.KL_ge_0 -> information_space.KL_nonneg information_space.mutual_information_eq -> information_space.mutual_information_simple_distributed information_space.mutual_information_positive -> information_space.mutual_information_nonneg_simple Int_stable_cuboids -> Int_stable_atLeastAtMost Int_stable_product_algebra_generator -> positive_integral measure_preserving -> equality "distr M N f = N" "f : measurable M N" measure_space.additive -> emeasure_additive measure_space.AE_iff_null_set -> AE_iff_null measure_space.almost_everywhere_def -> eventually_ae_filter measure_space.almost_everywhere_vimage -> AE_distrD measure_space.continuity_from_above -> INF_emeasure_decseq measure_space.continuity_from_above_Lim -> Lim_emeasure_decseq measure_space.continuity_from_below_Lim -> Lim_emeasure_incseq measure_space.continuity_from_below -> SUP_emeasure_incseq measure_space_density -> emeasure_density measure_space.density_is_absolutely_continuous -> absolutely_continuousI_density measure_space.integrable_vimage -> integrable_distr measure_space.integral_translated_density -> integral_density measure_space.integral_vimage -> integral_distr measure_space.measure_additive -> plus_emeasure measure_space.measure_compl -> emeasure_compl measure_space.measure_countable_increasing -> emeasure_countable_increasing measure_space.measure_countably_subadditive -> emeasure_subadditive_countably measure_space.measure_decseq -> decseq_emeasure measure_space.measure_Diff -> emeasure_Diff measure_space.measure_Diff_null_set -> emeasure_Diff_null_set measure_space.measure_eq_0 -> emeasure_eq_0 measure_space.measure_finitely_subadditive -> emeasure_subadditive_finite measure_space.measure_finite_singleton -> emeasure_eq_setsum_singleton measure_space.measure_incseq -> incseq_emeasure measure_space.measure_insert -> emeasure_insert measure_space.measure_mono -> emeasure_mono measure_space.measure_not_negative -> emeasure_not_MInf measure_space.measure_preserving_Int_stable -> measure_eqI_generator_eq measure_space.measure_setsum -> setsum_emeasure measure_space.measure_setsum_split -> setsum_emeasure_cover measure_space.measure_space_vimage -> emeasure_distr measure_space.measure_subadditive_finite -> emeasure_subadditive_finite measure_space.measure_subadditive -> subadditive measure_space.measure_top -> emeasure_space measure_space.measure_UN_eq_0 -> emeasure_UN_eq_0 measure_space.measure_Un_null_set -> emeasure_Un_null_set measure_space.positive_integral_translated_density -> positive_integral_density measure_space.positive_integral_vimage -> positive_integral_distr measure_space.real_continuity_from_above -> Lim_measure_decseq measure_space.real_continuity_from_below -> Lim_measure_incseq measure_space.real_measure_countably_subadditive -> measure_subadditive_countably measure_space.real_measure_Diff -> measure_Diff measure_space.real_measure_finite_Union -> measure_finite_Union measure_space.real_measure_setsum_singleton -> measure_eq_setsum_singleton measure_space.real_measure_subadditive -> measure_subadditive measure_space.real_measure_Union -> measure_Union measure_space.real_measure_UNION -> measure_UNION measure_space.simple_function_vimage -> simple_function_comp measure_space.simple_integral_vimage -> simple_integral_distr measure_space.simple_integral_vimage -> simple_integral_distr measure_unique_Int_stable -> measure_eqI_generator_eq measure_unique_Int_stable_vimage -> measure_eqI_generator_eq pair_sigma_algebra.measurable_cut_fst -> sets_Pair1 pair_sigma_algebra.measurable_cut_snd -> sets_Pair2 pair_sigma_algebra.measurable_pair_image_fst -> measurable_Pair1 pair_sigma_algebra.measurable_pair_image_snd -> measurable_Pair2 pair_sigma_algebra.measurable_product_swap -> measurable_pair_swap_iff pair_sigma_algebra.pair_sigma_algebra_measurable -> measurable_pair_swap pair_sigma_algebra.pair_sigma_algebra_swap_measurable -> measurable_pair_swap' pair_sigma_algebra.sets_swap -> sets_pair_swap pair_sigma_finite.measure_cut_measurable_fst -> pair_sigma_finite.measurable_emeasure_Pair1 pair_sigma_finite.measure_cut_measurable_snd -> pair_sigma_finite.measurable_emeasure_Pair2 pair_sigma_finite.measure_preserving_swap -> pair_sigma_finite.distr_pair_swap pair_sigma_finite.pair_measure_alt2 -> pair_sigma_finite.emeasure_pair_measure_alt2 pair_sigma_finite.pair_measure_alt -> pair_sigma_finite.emeasure_pair_measure_alt pair_sigma_finite.pair_measure_times -> pair_sigma_finite.emeasure_pair_measure_Times prob_space.indep_distribution_eq_measure -> prob_space.indep_vars_iff_distr_eq_PiM prob_space.indep_var_distributionD -> prob_space.indep_var_distribution_eq prob_space.measure_space_1 -> prob_space.emeasure_space_1 prob_space.prob_space_vimage -> prob_space_distr prob_space.random_variable_restrict -> measurable_restrict prob_space_unique_Int_stable -> measure_eqI_prob_space product_algebraE -> prod_algebraE_all product_algebra_generator_der -> prod_algebra_eq_finite product_algebra_generator_into_space -> prod_algebra_sets_into_space product_algebraI -> sets_PiM_I_finite product_measure_exists -> product_sigma_finite.sigma_finite product_prob_space.finite_index_eq_finite_product -> product_prob_space.sets_PiM_generator product_prob_space.finite_measure_infprod_emb_Pi -> product_prob_space.measure_PiM_emb product_prob_space.infprod_spec -> product_prob_space.emeasure_PiM_emb_not_empty product_prob_space.measurable_component -> measurable_component_singleton product_prob_space.measurable_emb -> measurable_prod_emb product_prob_space.measurable_into_infprod_algebra -> measurable_PiM_single product_prob_space.measurable_singleton_infprod -> measurable_component_singleton product_prob_space.measure_emb -> emeasure_prod_emb product_prob_space.measure_preserving_restrict -> product_prob_space.distr_restrict product_sigma_algebra.product_algebra_into_space -> space_closed product_sigma_finite.measure_fold -> product_sigma_finite.distr_merge product_sigma_finite.measure_preserving_component_singelton -> product_sigma_finite.distr_singleton product_sigma_finite.measure_preserving_merge -> product_sigma_finite.distr_merge sequence_space.measure_infprod -> sequence_space.measure_PiM_countable sets_product_algebra -> sets_PiM sigma_algebra.measurable_sigma -> measurable_measure_of sigma_finite_measure.disjoint_sigma_finite -> sigma_finite_disjoint sigma_finite_measure.RN_deriv_vimage -> sigma_finite_measure.RN_deriv_distr sigma_product_algebra_sigma_eq -> sigma_prod_algebra_sigma_eq space_product_algebra -> space_PiM * Session HOL-TPTP: support to parse and import TPTP problems (all languages) into Isabelle/HOL. *** FOL *** * New "case_product" attribute (see HOL). *** ZF *** * Greater support for structured proofs involving induction or case analysis. * Much greater use of mathematical symbols. * Removal of many ML theorem bindings. INCOMPATIBILITY. *** ML *** * Antiquotation @{keyword "name"} produces a parser for outer syntax from a minor keyword introduced via theory header declaration. * Antiquotation @{command_spec "name"} produces the Outer_Syntax.command_spec from a major keyword introduced via theory header declaration; it can be passed to Outer_Syntax.command etc. * Local_Theory.define no longer hard-wires default theorem name "foo_def", but retains the binding as given. If that is Binding.empty / Attrib.empty_binding, the result is not registered as user-level fact. The Local_Theory.define_internal variant allows to specify a non-empty name (used for the foundation in the background theory), while omitting the fact binding in the user-context. Potential INCOMPATIBILITY for derived definitional packages: need to specify naming policy for primitive definitions more explicitly. * Renamed Thm.capply to Thm.apply, and Thm.cabs to Thm.lambda in conformance with similar operations in structure Term and Logic. * Antiquotation @{attributes [...]} embeds attribute source representation into the ML text, which is particularly useful with declarations like Local_Theory.note. * Structure Proof_Context follows standard naming scheme. Old ProofContext has been discontinued. INCOMPATIBILITY. * Refined Local_Theory.declaration {syntax, pervasive}, with subtle change of semantics: update is applied to auxiliary local theory context as well. * Modernized some old-style infix operations: addeqcongs ~> Simplifier.add_eqcong deleqcongs ~> Simplifier.del_eqcong addcongs ~> Simplifier.add_cong delcongs ~> Simplifier.del_cong setmksimps ~> Simplifier.set_mksimps setmkcong ~> Simplifier.set_mkcong setmksym ~> Simplifier.set_mksym setmkeqTrue ~> Simplifier.set_mkeqTrue settermless ~> Simplifier.set_termless setsubgoaler ~> Simplifier.set_subgoaler addsplits ~> Splitter.add_split delsplits ~> Splitter.del_split *** System *** * USER_HOME settings variable points to cross-platform user home directory, which coincides with HOME on POSIX systems only. Likewise, the Isabelle path specification "~" now expands to $USER_HOME, instead of former $HOME. A different default for USER_HOME may be set explicitly in shell environment, before Isabelle settings are evaluated. Minor INCOMPATIBILITY: need to adapt Isabelle path where the generic user home was intended. * ISABELLE_HOME_WINDOWS refers to ISABELLE_HOME in windows file name notation, which is useful for the jEdit file browser, for example. * ISABELLE_JDK_HOME settings variable points to JDK with javac and jar (not just JRE). New in Isabelle2011-1 (October 2011) ------------------------------------ *** General *** * Improved Isabelle/jEdit Prover IDE (PIDE), which can be invoked as "isabelle jedit" or "ISABELLE_HOME/Isabelle" on the command line. - Management of multiple theory files directly from the editor buffer store -- bypassing the file-system (no requirement to save files for checking). - Markup of formal entities within the text buffer, with semantic highlighting, tooltips and hyperlinks to jump to defining source positions. - Improved text rendering, with sub/superscripts in the source buffer (including support for copy/paste wrt. output panel, HTML theory output and other non-Isabelle text boxes). - Refined scheduling of proof checking and printing of results, based on interactive editor view. (Note: jEdit folding and narrowing allows to restrict buffer perspectives explicitly.) - Reduced CPU performance requirements, usable on machines with few cores. - Reduced memory requirements due to pruning of unused document versions (garbage collection). See also ~~/src/Tools/jEdit/README.html for further information, including some remaining limitations. * Theory loader: source files are exclusively located via the master directory of each theory node (where the .thy file itself resides). The global load path (such as src/HOL/Library) has been discontinued. Note that the path element ~~ may be used to reference theories in the Isabelle home folder -- for instance, "~~/src/HOL/Library/FuncSet". INCOMPATIBILITY. * Theory loader: source files are identified by content via SHA1 digests. Discontinued former path/modtime identification and optional ISABELLE_FILE_IDENT plugin scripts. * Parallelization of nested Isar proofs is subject to Goal.parallel_proofs_threshold (default 100). See also isabelle usedir option -Q. * Name space: former unsynchronized references are now proper configuration options, with more conventional names: long_names ~> names_long short_names ~> names_short unique_names ~> names_unique Minor INCOMPATIBILITY, need to declare options in context like this: declare [[names_unique = false]] * Literal facts `prop` may contain dummy patterns, e.g. `_ = _`. Note that the result needs to be unique, which means fact specifications may have to be refined after enriching a proof context. * Attribute "case_names" has been refined: the assumptions in each case can be named now by following the case name with [name1 name2 ...]. * Isabelle/Isar reference manual has been updated and extended: - "Synopsis" provides a catalog of main Isar language concepts. - Formal references in syntax diagrams, via @{rail} antiquotation. - Updated material from classic "ref" manual, notably about "Classical Reasoner". *** HOL *** * Class bot and top require underlying partial order rather than preorder: uniqueness of bot and top is guaranteed. INCOMPATIBILITY. * Class complete_lattice: generalized a couple of lemmas from sets; generalized theorems INF_cong and SUP_cong. New type classes for complete boolean algebras and complete linear orders. Lemmas Inf_less_iff, less_Sup_iff, INF_less_iff, less_SUP_iff now reside in class complete_linorder. Changed proposition of lemmas Inf_bool_def, Sup_bool_def, Inf_fun_def, Sup_fun_def, Inf_apply, Sup_apply. Removed redundant lemmas (the right hand side gives hints how to replace them for (metis ...), or (simp only: ...) proofs): Inf_singleton ~> Inf_insert [where A="{}", unfolded Inf_empty inf_top_right] Sup_singleton ~> Sup_insert [where A="{}", unfolded Sup_empty sup_bot_right] Inf_binary ~> Inf_insert, Inf_empty, and inf_top_right Sup_binary ~> Sup_insert, Sup_empty, and sup_bot_right Int_eq_Inter ~> Inf_insert, Inf_empty, and inf_top_right Un_eq_Union ~> Sup_insert, Sup_empty, and sup_bot_right Inter_def ~> INF_def, image_def Union_def ~> SUP_def, image_def INT_eq ~> INF_def, and image_def UN_eq ~> SUP_def, and image_def INF_subset ~> INF_superset_mono [OF _ order_refl] More consistent and comprehensive names: INTER_eq_Inter_image ~> INF_def UNION_eq_Union_image ~> SUP_def INFI_def ~> INF_def SUPR_def ~> SUP_def INF_leI ~> INF_lower INF_leI2 ~> INF_lower2 le_INFI ~> INF_greatest le_SUPI ~> SUP_upper le_SUPI2 ~> SUP_upper2 SUP_leI ~> SUP_least INFI_bool_eq ~> INF_bool_eq SUPR_bool_eq ~> SUP_bool_eq INFI_apply ~> INF_apply SUPR_apply ~> SUP_apply INTER_def ~> INTER_eq UNION_def ~> UNION_eq INCOMPATIBILITY. * Renamed theory Complete_Lattice to Complete_Lattices. INCOMPATIBILITY. * Theory Complete_Lattices: lemmas Inf_eq_top_iff, INF_eq_top_iff, INF_image, Inf_insert, INF_top, Inf_top_conv, INF_top_conv, SUP_bot, Sup_bot_conv, SUP_bot_conv, Sup_eq_top_iff, SUP_eq_top_iff, SUP_image, Sup_insert are now declared as [simp]. INCOMPATIBILITY. * Theory Lattice: lemmas compl_inf_bot, compl_le_comp_iff, compl_sup_top, inf_idem, inf_left_idem, inf_sup_absorb, sup_idem, sup_inf_absob, sup_left_idem are now declared as [simp]. Minor INCOMPATIBILITY. * Added syntactic classes "inf" and "sup" for the respective constants. INCOMPATIBILITY: Changes in the argument order of the (mostly internal) locale predicates for some derived classes. * Theorem collections ball_simps and bex_simps do not contain theorems referring to UNION any longer; these have been moved to collection UN_ball_bex_simps. INCOMPATIBILITY. * Theory Archimedean_Field: floor now is defined as parameter of a separate type class floor_ceiling. * Theory Finite_Set: more coherent development of fold_set locales: locale fun_left_comm ~> locale comp_fun_commute locale fun_left_comm_idem ~> locale comp_fun_idem Both use point-free characterization; interpretation proofs may need adjustment. INCOMPATIBILITY. * Theory Limits: Type "'a net" has been renamed to "'a filter", in accordance with standard mathematical terminology. INCOMPATIBILITY. * Theory Complex_Main: The locale interpretations for the bounded_linear and bounded_bilinear locales have been removed, in order to reduce the number of duplicate lemmas. Users must use the original names for distributivity theorems, potential INCOMPATIBILITY. divide.add ~> add_divide_distrib divide.diff ~> diff_divide_distrib divide.setsum ~> setsum_divide_distrib mult.add_right ~> right_distrib mult.diff_right ~> right_diff_distrib mult_right.setsum ~> setsum_right_distrib mult_left.diff ~> left_diff_distrib * Theory Complex_Main: Several redundant theorems have been removed or replaced by more general versions. INCOMPATIBILITY. real_diff_def ~> minus_real_def real_divide_def ~> divide_real_def real_less_def ~> less_le real_abs_def ~> abs_real_def real_sgn_def ~> sgn_real_def real_mult_commute ~> mult_commute real_mult_assoc ~> mult_assoc real_mult_1 ~> mult_1_left real_add_mult_distrib ~> left_distrib real_zero_not_eq_one ~> zero_neq_one real_mult_inverse_left ~> left_inverse INVERSE_ZERO ~> inverse_zero real_le_refl ~> order_refl real_le_antisym ~> order_antisym real_le_trans ~> order_trans real_le_linear ~> linear real_le_eq_diff ~> le_iff_diff_le_0 real_add_left_mono ~> add_left_mono real_mult_order ~> mult_pos_pos real_mult_less_mono2 ~> mult_strict_left_mono real_of_int_real_of_nat ~> real_of_int_of_nat_eq real_0_le_divide_iff ~> zero_le_divide_iff realpow_two_disj ~> power2_eq_iff real_squared_diff_one_factored ~> square_diff_one_factored realpow_two_diff ~> square_diff_square_factored reals_complete2 ~> complete_real real_sum_squared_expand ~> power2_sum exp_ln_eq ~> ln_unique expi_add ~> exp_add expi_zero ~> exp_zero lemma_DERIV_subst ~> DERIV_cong LIMSEQ_Zfun_iff ~> tendsto_Zfun_iff LIMSEQ_const ~> tendsto_const LIMSEQ_norm ~> tendsto_norm LIMSEQ_add ~> tendsto_add LIMSEQ_minus ~> tendsto_minus LIMSEQ_minus_cancel ~> tendsto_minus_cancel LIMSEQ_diff ~> tendsto_diff bounded_linear.LIMSEQ ~> bounded_linear.tendsto bounded_bilinear.LIMSEQ ~> bounded_bilinear.tendsto LIMSEQ_mult ~> tendsto_mult LIMSEQ_inverse ~> tendsto_inverse LIMSEQ_divide ~> tendsto_divide LIMSEQ_pow ~> tendsto_power LIMSEQ_setsum ~> tendsto_setsum LIMSEQ_setprod ~> tendsto_setprod LIMSEQ_norm_zero ~> tendsto_norm_zero_iff LIMSEQ_rabs_zero ~> tendsto_rabs_zero_iff LIMSEQ_imp_rabs ~> tendsto_rabs LIMSEQ_add_minus ~> tendsto_add [OF _ tendsto_minus] LIMSEQ_add_const ~> tendsto_add [OF _ tendsto_const] LIMSEQ_diff_const ~> tendsto_diff [OF _ tendsto_const] LIMSEQ_Complex ~> tendsto_Complex LIM_ident ~> tendsto_ident_at LIM_const ~> tendsto_const LIM_add ~> tendsto_add LIM_add_zero ~> tendsto_add_zero LIM_minus ~> tendsto_minus LIM_diff ~> tendsto_diff LIM_norm ~> tendsto_norm LIM_norm_zero ~> tendsto_norm_zero LIM_norm_zero_cancel ~> tendsto_norm_zero_cancel LIM_norm_zero_iff ~> tendsto_norm_zero_iff LIM_rabs ~> tendsto_rabs LIM_rabs_zero ~> tendsto_rabs_zero LIM_rabs_zero_cancel ~> tendsto_rabs_zero_cancel LIM_rabs_zero_iff ~> tendsto_rabs_zero_iff LIM_compose ~> tendsto_compose LIM_mult ~> tendsto_mult LIM_scaleR ~> tendsto_scaleR LIM_of_real ~> tendsto_of_real LIM_power ~> tendsto_power LIM_inverse ~> tendsto_inverse LIM_sgn ~> tendsto_sgn isCont_LIM_compose ~> isCont_tendsto_compose bounded_linear.LIM ~> bounded_linear.tendsto bounded_linear.LIM_zero ~> bounded_linear.tendsto_zero bounded_bilinear.LIM ~> bounded_bilinear.tendsto bounded_bilinear.LIM_prod_zero ~> bounded_bilinear.tendsto_zero bounded_bilinear.LIM_left_zero ~> bounded_bilinear.tendsto_left_zero bounded_bilinear.LIM_right_zero ~> bounded_bilinear.tendsto_right_zero LIM_inverse_fun ~> tendsto_inverse [OF tendsto_ident_at] * Theory Complex_Main: The definition of infinite series was generalized. Now it is defined on the type class {topological_space, comm_monoid_add}. Hence it is useable also for extended real numbers. * Theory Complex_Main: The complex exponential function "expi" is now a type-constrained abbreviation for "exp :: complex => complex"; thus several polymorphic lemmas about "exp" are now applicable to "expi". * Code generation: - Theory Library/Code_Char_ord provides native ordering of characters in the target language. - Commands code_module and code_library are legacy, use export_code instead. - Method "evaluation" is legacy, use method "eval" instead. - Legacy evaluator "SML" is deactivated by default. May be reactivated by the following theory command: setup {* Value.add_evaluator ("SML", Codegen.eval_term) *} * Declare ext [intro] by default. Rare INCOMPATIBILITY. * New proof method "induction" that gives induction hypotheses the name "IH", thus distinguishing them from further hypotheses that come from rule induction. The latter are still called "hyps". Method "induction" is a thin wrapper around "induct" and follows the same syntax. * Method "fastsimp" has been renamed to "fastforce", but "fastsimp" is still available as a legacy feature for some time. * Nitpick: - Added "need" and "total_consts" options. - Reintroduced "show_skolems" option by popular demand. - Renamed attribute: nitpick_def ~> nitpick_unfold. INCOMPATIBILITY. * Sledgehammer: - Use quasi-sound (and efficient) translations by default. - Added support for the following provers: E-ToFoF, LEO-II, Satallax, SNARK, Waldmeister, and Z3 with TPTP syntax. - Automatically preplay and minimize proofs before showing them if this can be done within reasonable time. - sledgehammer available_provers ~> sledgehammer supported_provers. INCOMPATIBILITY. - Added "preplay_timeout", "slicing", "type_enc", "sound", "max_mono_iters", and "max_new_mono_instances" options. - Removed "explicit_apply" and "full_types" options as well as "Full Types" Proof General menu item. INCOMPATIBILITY. * Metis: - Removed "metisF" -- use "metis" instead. INCOMPATIBILITY. - Obsoleted "metisFT" -- use "metis (full_types)" instead. INCOMPATIBILITY. * Command 'try': - Renamed 'try_methods' and added "simp:", "intro:", "dest:", and "elim:" options. INCOMPATIBILITY. - Introduced 'try' that not only runs 'try_methods' but also 'solve_direct', 'sledgehammer', 'quickcheck', and 'nitpick'. * Quickcheck: - Added "eval" option to evaluate terms for the found counterexample (currently only supported by the default (exhaustive) tester). - Added post-processing of terms to obtain readable counterexamples (currently only supported by the default (exhaustive) tester). - New counterexample generator quickcheck[narrowing] enables narrowing-based testing. Requires the Glasgow Haskell compiler with its installation location defined in the Isabelle settings environment as ISABELLE_GHC. - Removed quickcheck tester "SML" based on the SML code generator (formly in HOL/Library). * Function package: discontinued option "tailrec". INCOMPATIBILITY, use 'partial_function' instead. * Theory Library/Extended_Reals replaces now the positive extended reals found in probability theory. This file is extended by Multivariate_Analysis/Extended_Real_Limits. * Theory Library/Old_Recdef: old 'recdef' package has been moved here, from where it must be imported explicitly if it is really required. INCOMPATIBILITY. * Theory Library/Wfrec: well-founded recursion combinator "wfrec" has been moved here. INCOMPATIBILITY. * Theory Library/Saturated provides type of numbers with saturated arithmetic. * Theory Library/Product_Lattice defines a pointwise ordering for the product type 'a * 'b, and provides instance proofs for various order and lattice type classes. * Theory Library/Countable now provides the "countable_datatype" proof method for proving "countable" class instances for datatypes. * Theory Library/Cset_Monad allows do notation for computable sets (cset) via the generic monad ad-hoc overloading facility. * Library: Theories of common data structures are split into theories for implementation, an invariant-ensuring type, and connection to an abstract type. INCOMPATIBILITY. - RBT is split into RBT and RBT_Mapping. - AssocList is split and renamed into AList and AList_Mapping. - DList is split into DList_Impl, DList, and DList_Cset. - Cset is split into Cset and List_Cset. * Theory Library/Nat_Infinity has been renamed to Library/Extended_Nat, with name changes of the following types and constants: type inat ~> type enat Fin ~> enat Infty ~> infinity (overloaded) iSuc ~> eSuc the_Fin ~> the_enat Every theorem name containing "inat", "Fin", "Infty", or "iSuc" has been renamed accordingly. INCOMPATIBILITY. * Session Multivariate_Analysis: The euclidean_space type class now fixes a constant "Basis :: 'a set" consisting of the standard orthonormal basis for the type. Users now have the option of quantifying over this set instead of using the "basis" function, e.g. "ALL x:Basis. P x" vs "ALL i vec_eq_iff dist_nth_le_cart ~> dist_vec_nth_le tendsto_vector ~> vec_tendstoI Cauchy_vector ~> vec_CauchyI * Session Multivariate_Analysis: Several duplicate theorems have been removed, and other theorems have been renamed or replaced with more general versions. INCOMPATIBILITY. finite_choice ~> finite_set_choice eventually_conjI ~> eventually_conj eventually_and ~> eventually_conj_iff eventually_false ~> eventually_False setsum_norm ~> norm_setsum Lim_sequentially ~> LIMSEQ_def Lim_ident_at ~> LIM_ident Lim_const ~> tendsto_const Lim_cmul ~> tendsto_scaleR [OF tendsto_const] Lim_neg ~> tendsto_minus Lim_add ~> tendsto_add Lim_sub ~> tendsto_diff Lim_mul ~> tendsto_scaleR Lim_vmul ~> tendsto_scaleR [OF _ tendsto_const] Lim_null_norm ~> tendsto_norm_zero_iff [symmetric] Lim_linear ~> bounded_linear.tendsto Lim_component ~> tendsto_euclidean_component Lim_component_cart ~> tendsto_vec_nth Lim_inner ~> tendsto_inner [OF tendsto_const] dot_lsum ~> inner_setsum_left dot_rsum ~> inner_setsum_right continuous_cmul ~> continuous_scaleR [OF continuous_const] continuous_neg ~> continuous_minus continuous_sub ~> continuous_diff continuous_vmul ~> continuous_scaleR [OF _ continuous_const] continuous_mul ~> continuous_scaleR continuous_inv ~> continuous_inverse continuous_at_within_inv ~> continuous_at_within_inverse continuous_at_inv ~> continuous_at_inverse continuous_at_norm ~> continuous_norm [OF continuous_at_id] continuous_at_infnorm ~> continuous_infnorm [OF continuous_at_id] continuous_at_component ~> continuous_component [OF continuous_at_id] continuous_on_neg ~> continuous_on_minus continuous_on_sub ~> continuous_on_diff continuous_on_cmul ~> continuous_on_scaleR [OF continuous_on_const] continuous_on_vmul ~> continuous_on_scaleR [OF _ continuous_on_const] continuous_on_mul ~> continuous_on_scaleR continuous_on_mul_real ~> continuous_on_mult continuous_on_inner ~> continuous_on_inner [OF continuous_on_const] continuous_on_norm ~> continuous_on_norm [OF continuous_on_id] continuous_on_inverse ~> continuous_on_inv uniformly_continuous_on_neg ~> uniformly_continuous_on_minus uniformly_continuous_on_sub ~> uniformly_continuous_on_diff subset_interior ~> interior_mono subset_closure ~> closure_mono closure_univ ~> closure_UNIV real_arch_lt ~> reals_Archimedean2 real_arch ~> reals_Archimedean3 real_abs_norm ~> abs_norm_cancel real_abs_sub_norm ~> norm_triangle_ineq3 norm_cauchy_schwarz_abs ~> Cauchy_Schwarz_ineq2 * Session HOL-Probability: - Caratheodory's extension lemma is now proved for ring_of_sets. - Infinite products of probability measures are now available. - Sigma closure is independent, if the generator is independent - Use extended reals instead of positive extended reals. INCOMPATIBILITY. * Session HOLCF: Discontinued legacy theorem names, INCOMPATIBILITY. expand_fun_below ~> fun_below_iff below_fun_ext ~> fun_belowI expand_cfun_eq ~> cfun_eq_iff ext_cfun ~> cfun_eqI expand_cfun_below ~> cfun_below_iff below_cfun_ext ~> cfun_belowI monofun_fun_fun ~> fun_belowD monofun_fun_arg ~> monofunE monofun_lub_fun ~> adm_monofun [THEN admD] cont_lub_fun ~> adm_cont [THEN admD] cont2cont_Rep_CFun ~> cont2cont_APP cont_Rep_CFun_app ~> cont_APP_app cont_Rep_CFun_app_app ~> cont_APP_app_app cont_cfun_fun ~> cont_Rep_cfun1 [THEN contE] cont_cfun_arg ~> cont_Rep_cfun2 [THEN contE] contlub_cfun ~> lub_APP [symmetric] contlub_LAM ~> lub_LAM [symmetric] thelubI ~> lub_eqI UU_I ~> bottomI lift_distinct1 ~> lift.distinct(1) lift_distinct2 ~> lift.distinct(2) Def_not_UU ~> lift.distinct(2) Def_inject ~> lift.inject below_UU_iff ~> below_bottom_iff eq_UU_iff ~> eq_bottom_iff *** Document preparation *** * Antiquotation @{rail} layouts railroad syntax diagrams, see also isar-ref manual, both for description and actual application of the same. * Antiquotation @{value} evaluates the given term and presents its result. * Antiquotations: term style "isub" provides ad-hoc conversion of variables x1, y23 into subscripted form x\<^isub>1, y\<^isub>2\<^isub>3. * Predefined LaTeX macros for Isabelle symbols \ and \ (e.g. see ~~/src/HOL/Library/Monad_Syntax.thy). * Localized \isabellestyle switch can be used within blocks or groups like this: \isabellestyle{it} %preferred default {\isabellestylett @{text "typewriter stuff"}} * Discontinued special treatment of hard tabulators. Implicit tab-width is now defined as 1. Potential INCOMPATIBILITY for visual layouts. *** ML *** * The inner syntax of sort/type/term/prop supports inlined YXML representations within quoted string tokens. By encoding logical entities via Term_XML (in ML or Scala) concrete syntax can be bypassed, which is particularly useful for producing bits of text under external program control. * Antiquotations for ML and document preparation are managed as theory data, which requires explicit setup. * Isabelle_Process.is_active allows tools to check if the official process wrapper is running (Isabelle/Scala/jEdit) or the old TTY loop (better known as Proof General). * Structure Proof_Context follows standard naming scheme. Old ProofContext is still available for some time as legacy alias. * Structure Timing provides various operations for timing; supersedes former start_timing/end_timing etc. * Path.print is the official way to show file-system paths to users (including quotes etc.). * Inner syntax: identifiers in parse trees of generic categories "logic", "aprop", "idt" etc. carry position information (disguised as type constraints). Occasional INCOMPATIBILITY with non-compliant translations that choke on unexpected type constraints. Positions can be stripped in ML translations via Syntax.strip_positions / Syntax.strip_positions_ast, or via the syntax constant "_strip_positions" within parse trees. As last resort, positions can be disabled via the configuration option Syntax.positions, which is called "syntax_positions" in Isar attribute syntax. * Discontinued special status of various ML structures that contribute to structure Syntax (Ast, Lexicon, Mixfix, Parser, Printer etc.): less pervasive content, no inclusion in structure Syntax. INCOMPATIBILITY, refer directly to Ast.Constant, Lexicon.is_identifier, Syntax_Trans.mk_binder_tr etc. * Typed print translation: discontinued show_sorts argument, which is already available via context of "advanced" translation. * Refined PARALLEL_GOALS tactical: degrades gracefully for schematic goal states; body tactic needs to address all subgoals uniformly. * Slightly more special eq_list/eq_set, with shortcut involving pointer equality (assumes that eq relation is reflexive). * Classical tactics use proper Proof.context instead of historic types claset/clasimpset. Old-style declarations like addIs, addEs, addDs operate directly on Proof.context. Raw type claset retains its use as snapshot of the classical context, which can be recovered via (put_claset HOL_cs) etc. Type clasimpset has been discontinued. INCOMPATIBILITY, classical tactics and derived proof methods require proper Proof.context. *** System *** * Discontinued support for Poly/ML 5.2, which was the last version without proper multithreading and TimeLimit implementation. * Discontinued old lib/scripts/polyml-platform, which has been obsolete since Isabelle2009-2. * Various optional external tools are referenced more robustly and uniformly by explicit Isabelle settings as follows: ISABELLE_CSDP (formerly CSDP_EXE) ISABELLE_GHC (formerly EXEC_GHC or GHC_PATH) ISABELLE_OCAML (formerly EXEC_OCAML) ISABELLE_SWIPL (formerly EXEC_SWIPL) ISABELLE_YAP (formerly EXEC_YAP) Note that automated detection from the file-system or search path has been discontinued. INCOMPATIBILITY. * Scala layer provides JVM method invocation service for static methods of type (String)String, see Invoke_Scala.method in ML. For example: Invoke_Scala.method "java.lang.System.getProperty" "java.home" Together with YXML.string_of_body/parse_body and XML.Encode/Decode this allows to pass structured values between ML and Scala. * The IsabelleText fonts includes some further glyphs to support the Prover IDE. Potential INCOMPATIBILITY: users who happen to have installed a local copy (which is normally *not* required) need to delete or update it from ~~/lib/fonts/. New in Isabelle2011 (January 2011) ---------------------------------- *** General *** * Experimental Prover IDE based on Isabelle/Scala and jEdit (see src/Tools/jEdit). This also serves as IDE for Isabelle/ML, with useful tooltips and hyperlinks produced from its static analysis. The bundled component provides an executable Isabelle tool that can be run like this: Isabelle2011/bin/isabelle jedit * Significantly improved Isabelle/Isar implementation manual. * System settings: ISABELLE_HOME_USER now includes ISABELLE_IDENTIFIER (and thus refers to something like $HOME/.isabelle/Isabelle2011), while the default heap location within that directory lacks that extra suffix. This isolates multiple Isabelle installations from each other, avoiding problems with old settings in new versions. INCOMPATIBILITY, need to copy/upgrade old user settings manually. * Source files are always encoded as UTF-8, instead of old-fashioned ISO-Latin-1. INCOMPATIBILITY. Isabelle LaTeX documents might require the following package declarations: \usepackage[utf8]{inputenc} \usepackage{textcomp} * Explicit treatment of UTF-8 sequences as Isabelle symbols, such that a Unicode character is treated as a single symbol, not a sequence of non-ASCII bytes as before. Since Isabelle/ML string literals may contain symbols without further backslash escapes, Unicode can now be used here as well. Recall that Symbol.explode in ML provides a consistent view on symbols, while raw explode (or String.explode) merely give a byte-oriented representation. * Theory loader: source files are primarily located via the master directory of each theory node (where the .thy file itself resides). The global load path is still partially available as legacy feature. Minor INCOMPATIBILITY due to subtle change in file lookup: use explicit paths, relatively to the theory. * Special treatment of ML file names has been discontinued. Historically, optional extensions .ML or .sml were added on demand -- at the cost of clarity of file dependencies. Recall that Isabelle/ML files exclusively use the .ML extension. Minor INCOMPATIBILITY. * Various options that affect pretty printing etc. are now properly handled within the context via configuration options, instead of unsynchronized references or print modes. There are both ML Config.T entities and Isar declaration attributes to access these. ML (Config.T) Isar (attribute) eta_contract eta_contract show_brackets show_brackets show_sorts show_sorts show_types show_types show_question_marks show_question_marks show_consts show_consts show_abbrevs show_abbrevs Syntax.ast_trace syntax_ast_trace Syntax.ast_stat syntax_ast_stat Syntax.ambiguity_level syntax_ambiguity_level Goal_Display.goals_limit goals_limit Goal_Display.show_main_goal show_main_goal Method.rule_trace rule_trace Thy_Output.display thy_output_display Thy_Output.quotes thy_output_quotes Thy_Output.indent thy_output_indent Thy_Output.source thy_output_source Thy_Output.break thy_output_break Note that corresponding "..._default" references in ML may only be changed globally at the ROOT session setup, but *not* within a theory. The option "show_abbrevs" supersedes the former print mode "no_abbrevs" with inverted meaning. * More systematic naming of some configuration options. INCOMPATIBILITY. trace_simp ~> simp_trace debug_simp ~> simp_debug * Support for real valued configuration options, using simplistic floating-point notation that coincides with the inner syntax for float_token. * Support for real valued preferences (with approximative PGIP type): front-ends need to accept "pgint" values in float notation. INCOMPATIBILITY. * The IsabelleText font now includes Cyrillic, Hebrew, Arabic from DejaVu Sans. * Discontinued support for Poly/ML 5.0 and 5.1 versions. *** Pure *** * Command 'type_synonym' (with single argument) replaces somewhat outdated 'types', which is still available as legacy feature for some time. * Command 'nonterminal' (with 'and' separated list of arguments) replaces somewhat outdated 'nonterminals'. INCOMPATIBILITY. * Command 'notepad' replaces former 'example_proof' for experimentation in Isar without any result. INCOMPATIBILITY. * Locale interpretation commands 'interpret' and 'sublocale' accept lists of equations to map definitions in a locale to appropriate entities in the context of the interpretation. The 'interpretation' command already provided this functionality. * Diagnostic command 'print_dependencies' prints the locale instances that would be activated if the specified expression was interpreted in the current context. Variant "print_dependencies!" assumes a context without interpretations. * Diagnostic command 'print_interps' prints interpretations in proofs in addition to interpretations in theories. * Discontinued obsolete 'global' and 'local' commands to manipulate the theory name space. Rare INCOMPATIBILITY. The ML functions Sign.root_path and Sign.local_path may be applied directly where this feature is still required for historical reasons. * Discontinued obsolete 'constdefs' command. INCOMPATIBILITY, use 'definition' instead. * The "prems" fact, which refers to the accidental collection of foundational premises in the context, is now explicitly marked as legacy feature and will be discontinued soon. Consider using "assms" of the head statement or reference facts by explicit names. * Document antiquotations @{class} and @{type} print classes and type constructors. * Document antiquotation @{file} checks file/directory entries within the local file system. *** HOL *** * Coercive subtyping: functions can be declared as coercions and type inference will add them as necessary upon input of a term. Theory Complex_Main declares real :: nat => real and real :: int => real as coercions. A coercion function f is declared like this: declare [[coercion f]] To lift coercions through type constructors (e.g. from nat => real to nat list => real list), map functions can be declared, e.g. declare [[coercion_map map]] Currently coercion inference is activated only in theories including real numbers, i.e. descendants of Complex_Main. This is controlled by the configuration option "coercion_enabled", e.g. it can be enabled in other theories like this: declare [[coercion_enabled]] * Command 'partial_function' provides basic support for recursive function definitions over complete partial orders. Concrete instances are provided for i) the option type, ii) tail recursion on arbitrary types, and iii) the heap monad of Imperative_HOL. See src/HOL/ex/Fundefs.thy and src/HOL/Imperative_HOL/ex/Linked_Lists.thy for examples. * Function package: f.psimps rules are no longer implicitly declared as [simp]. INCOMPATIBILITY. * Datatype package: theorems generated for executable equality (class "eq") carry proper names and are treated as default code equations. * Inductive package: now offers command 'inductive_simps' to automatically derive instantiated and simplified equations for inductive predicates, similar to 'inductive_cases'. * Command 'enriched_type' allows to register properties of the functorial structure of types. * Improved infrastructure for term evaluation using code generator techniques, in particular static evaluation conversions. * Code generator: Scala (2.8 or higher) has been added to the target languages. * Code generator: globbing constant expressions "*" and "Theory.*" have been replaced by the more idiomatic "_" and "Theory._". INCOMPATIBILITY. * Code generator: export_code without explicit file declaration prints to standard output. INCOMPATIBILITY. * Code generator: do not print function definitions for case combinators any longer. * Code generator: simplification with rules determined with src/Tools/Code/code_simp.ML and method "code_simp". * Code generator for records: more idiomatic representation of record types. Warning: records are not covered by ancient SML code generation any longer. INCOMPATIBILITY. In cases of need, a suitable rep_datatype declaration helps to succeed then: record 'a foo = ... ... rep_datatype foo_ext ... * Records: logical foundation type for records does not carry a '_type' suffix any longer (obsolete due to authentic syntax). INCOMPATIBILITY. * Quickcheck now by default uses exhaustive testing instead of random testing. Random testing can be invoked by "quickcheck [random]", exhaustive testing by "quickcheck [exhaustive]". * Quickcheck instantiates polymorphic types with small finite datatypes by default. This enables a simple execution mechanism to handle quantifiers and function equality over the finite datatypes. * Quickcheck random generator has been renamed from "code" to "random". INCOMPATIBILITY. * Quickcheck now has a configurable time limit which is set to 30 seconds by default. This can be changed by adding [timeout = n] to the quickcheck command. The time limit for Auto Quickcheck is still set independently. * Quickcheck in locales considers interpretations of that locale for counter example search. * Sledgehammer: - Added "smt" and "remote_smt" provers based on the "smt" proof method. See the Sledgehammer manual for details ("isabelle doc sledgehammer"). - Renamed commands: sledgehammer atp_info ~> sledgehammer running_provers sledgehammer atp_kill ~> sledgehammer kill_provers sledgehammer available_atps ~> sledgehammer available_provers INCOMPATIBILITY. - Renamed options: sledgehammer [atps = ...] ~> sledgehammer [provers = ...] sledgehammer [atp = ...] ~> sledgehammer [prover = ...] sledgehammer [timeout = 77 s] ~> sledgehammer [timeout = 77] (and "ms" and "min" are no longer supported) INCOMPATIBILITY. * Nitpick: - Renamed options: nitpick [timeout = 77 s] ~> nitpick [timeout = 77] nitpick [tac_timeout = 777 ms] ~> nitpick [tac_timeout = 0.777] INCOMPATIBILITY. - Added support for partial quotient types. - Added local versions of the "Nitpick.register_xxx" functions. - Added "whack" option. - Allow registration of quotient types as codatatypes. - Improved "merge_type_vars" option to merge more types. - Removed unsound "fast_descrs" option. - Added custom symmetry breaking for datatypes, making it possible to reach higher cardinalities. - Prevent the expansion of too large definitions. * Proof methods "metis" and "meson" now have configuration options "meson_trace", "metis_trace", and "metis_verbose" that can be enabled to diagnose these tools. E.g. using [[metis_trace = true]] * Auto Solve: Renamed "Auto Solve Direct". The tool is now available manually as command 'solve_direct'. * The default SMT solver Z3 must be enabled explicitly (due to licensing issues) by setting the environment variable Z3_NON_COMMERCIAL in etc/settings of the component, for example. For commercial applications, the SMT solver CVC3 is provided as fall-back; changing the SMT solver is done via the configuration option "smt_solver". * Remote SMT solvers need to be referred to by the "remote_" prefix, i.e. "remote_cvc3" and "remote_z3". * Added basic SMT support for datatypes, records, and typedefs using the oracle mode (no proofs). Direct support of pairs has been dropped in exchange (pass theorems fst_conv snd_conv pair_collapse to the SMT support for a similar behavior). Minor INCOMPATIBILITY. * Changed SMT configuration options: - Renamed: z3_proofs ~> smt_oracle (with inverted meaning) z3_trace_assms ~> smt_trace_used_facts INCOMPATIBILITY. - Added: smt_verbose smt_random_seed smt_datatypes smt_infer_triggers smt_monomorph_limit cvc3_options remote_cvc3_options remote_z3_options yices_options * Boogie output files (.b2i files) need to be declared in the theory header. * Simplification procedure "list_to_set_comprehension" rewrites list comprehensions applied to List.set to set comprehensions. Occasional INCOMPATIBILITY, may be deactivated like this: declare [[simproc del: list_to_set_comprehension]] * Removed old version of primrec package. INCOMPATIBILITY. * Removed simplifier congruence rule of "prod_case", as has for long been the case with "split". INCOMPATIBILITY. * String.literal is a type, but not a datatype. INCOMPATIBILITY. * Removed [split_format ... and ... and ...] version of [split_format]. Potential INCOMPATIBILITY. * Predicate "sorted" now defined inductively, with nice induction rules. INCOMPATIBILITY: former sorted.simps now named sorted_simps. * Constant "contents" renamed to "the_elem", to free the generic name contents for other uses. INCOMPATIBILITY. * Renamed class eq and constant eq (for code generation) to class equal and constant equal, plus renaming of related facts and various tuning. INCOMPATIBILITY. * Dropped type classes mult_mono and mult_mono1. INCOMPATIBILITY. * Removed output syntax "'a ~=> 'b" for "'a => 'b option". INCOMPATIBILITY. * Renamed theory Fset to Cset, type Fset.fset to Cset.set, in order to avoid confusion with finite sets. INCOMPATIBILITY. * Abandoned locales equiv, congruent and congruent2 for equivalence relations. INCOMPATIBILITY: use equivI rather than equiv_intro (same for congruent(2)). * Some previously unqualified names have been qualified: types bool ~> HOL.bool nat ~> Nat.nat constants Trueprop ~> HOL.Trueprop True ~> HOL.True False ~> HOL.False op & ~> HOL.conj op | ~> HOL.disj op --> ~> HOL.implies op = ~> HOL.eq Not ~> HOL.Not The ~> HOL.The All ~> HOL.All Ex ~> HOL.Ex Ex1 ~> HOL.Ex1 Let ~> HOL.Let If ~> HOL.If Ball ~> Set.Ball Bex ~> Set.Bex Suc ~> Nat.Suc Pair ~> Product_Type.Pair fst ~> Product_Type.fst snd ~> Product_Type.snd curry ~> Product_Type.curry op : ~> Set.member Collect ~> Set.Collect INCOMPATIBILITY. * More canonical naming convention for some fundamental definitions: bot_bool_eq ~> bot_bool_def top_bool_eq ~> top_bool_def inf_bool_eq ~> inf_bool_def sup_bool_eq ~> sup_bool_def bot_fun_eq ~> bot_fun_def top_fun_eq ~> top_fun_def inf_fun_eq ~> inf_fun_def sup_fun_eq ~> sup_fun_def INCOMPATIBILITY. * More stylized fact names: expand_fun_eq ~> fun_eq_iff expand_set_eq ~> set_eq_iff set_ext ~> set_eqI nat_number ~> eval_nat_numeral INCOMPATIBILITY. * Refactoring of code-generation specific operations in theory List: constants null ~> List.null facts mem_iff ~> member_def null_empty ~> null_def INCOMPATIBILITY. Note that these were not supposed to be used regularly unless for striking reasons; their main purpose was code generation. Various operations from the Haskell prelude are used for generating Haskell code. * Term "bij f" is now an abbreviation of "bij_betw f UNIV UNIV". Term "surj f" is now an abbreviation of "range f = UNIV". The theorems bij_def and surj_def are unchanged. INCOMPATIBILITY. * Abolished some non-alphabetic type names: "prod" and "sum" replace "*" and "+" respectively. INCOMPATIBILITY. * Name "Plus" of disjoint sum operator "<+>" is now hidden. Write "Sum_Type.Plus" instead. * Constant "split" has been merged with constant "prod_case"; names of ML functions, facts etc. involving split have been retained so far, though. INCOMPATIBILITY. * Dropped old infix syntax "_ mem _" for List.member; use "_ : set _" instead. INCOMPATIBILITY. * Removed lemma "Option.is_none_none" which duplicates "is_none_def". INCOMPATIBILITY. * Former theory Library/Enum is now part of the HOL-Main image. INCOMPATIBILITY: all constants of the Enum theory now have to be referred to by its qualified name. enum ~> Enum.enum nlists ~> Enum.nlists product ~> Enum.product * Theory Library/Monad_Syntax provides do-syntax for monad types. Syntax in Library/State_Monad has been changed to avoid ambiguities. INCOMPATIBILITY. * Theory Library/SetsAndFunctions has been split into Library/Function_Algebras and Library/Set_Algebras; canonical names for instance definitions for functions; various improvements. INCOMPATIBILITY. * Theory Library/Multiset provides stable quicksort implementation of sort_key. * Theory Library/Multiset: renamed empty_idemp ~> empty_neutral. INCOMPATIBILITY. * Session Multivariate_Analysis: introduced a type class for euclidean space. Most theorems are now stated in terms of euclidean spaces instead of finite cartesian products. types real ^ 'n ~> 'a::real_vector ~> 'a::euclidean_space ~> 'a::ordered_euclidean_space (depends on your needs) constants _ $ _ ~> _ $$ _ \ x. _ ~> \\ x. _ CARD('n) ~> DIM('a) Also note that the indices are now natural numbers and not from some finite type. Finite cartesian products of euclidean spaces, products of euclidean spaces the real and complex numbers are instantiated to be euclidean_spaces. INCOMPATIBILITY. * Session Probability: introduced pextreal as positive extended real numbers. Use pextreal as value for measures. Introduce the Radon-Nikodym derivative, product spaces and Fubini's theorem for arbitrary sigma finite measures. Introduces Lebesgue measure based on the integral in Multivariate Analysis. INCOMPATIBILITY. * Session Imperative_HOL: revamped, corrected dozens of inadequacies. INCOMPATIBILITY. * Session SPARK (with image HOL-SPARK) provides commands to load and prove verification conditions generated by the SPARK Ada program verifier. See also src/HOL/SPARK and src/HOL/SPARK/Examples. *** HOL-Algebra *** * Theorems for additive ring operations (locale abelian_monoid and descendants) are generated by interpretation from their multiplicative counterparts. Names (in particular theorem names) have the mandatory qualifier 'add'. Previous theorem names are redeclared for compatibility. * Structure "int_ring" is now an abbreviation (previously a definition). This fits more natural with advanced interpretations. *** HOLCF *** * The domain package now runs in definitional mode by default: The former command 'new_domain' is now called 'domain'. To use the domain package in its original axiomatic mode, use 'domain (unsafe)'. INCOMPATIBILITY. * The new class "domain" is now the default sort. Class "predomain" is an unpointed version of "domain". Theories can be updated by replacing sort annotations as shown below. INCOMPATIBILITY. 'a::type ~> 'a::countable 'a::cpo ~> 'a::predomain 'a::pcpo ~> 'a::domain * The old type class "rep" has been superseded by class "domain". Accordingly, users of the definitional package must remove any "default_sort rep" declarations. INCOMPATIBILITY. * The domain package (definitional mode) now supports unpointed predomain argument types, as long as they are marked 'lazy'. (Strict arguments must be in class "domain".) For example, the following domain definition now works: domain natlist = nil | cons (lazy "nat discr") (lazy "natlist") * Theory HOLCF/Library/HOL_Cpo provides cpo and predomain class instances for types from main HOL: bool, nat, int, char, 'a + 'b, 'a option, and 'a list. Additionally, it configures fixrec and the domain package to work with these types. For example: fixrec isInl :: "('a + 'b) u -> tr" where "isInl$(up$(Inl x)) = TT" | "isInl$(up$(Inr y)) = FF" domain V = VFun (lazy "V -> V") | VCon (lazy "nat") (lazy "V list") * The "(permissive)" option of fixrec has been replaced with a per-equation "(unchecked)" option. See src/HOL/HOLCF/Tutorial/Fixrec_ex.thy for examples. INCOMPATIBILITY. * The "bifinite" class no longer fixes a constant "approx"; the class now just asserts that such a function exists. INCOMPATIBILITY. * Former type "alg_defl" has been renamed to "defl". HOLCF no longer defines an embedding of type 'a defl into udom by default; instances of "bifinite" and "domain" classes are available in src/HOL/HOLCF/Library/Defl_Bifinite.thy. * The syntax "REP('a)" has been replaced with "DEFL('a)". * The predicate "directed" has been removed. INCOMPATIBILITY. * The type class "finite_po" has been removed. INCOMPATIBILITY. * The function "cprod_map" has been renamed to "prod_map". INCOMPATIBILITY. * The monadic bind operator on each powerdomain has new binder syntax similar to sets, e.g. "\\x\xs. t" represents "upper_bind\xs\(\ x. t)". * The infix syntax for binary union on each powerdomain has changed from e.g. "+\" to "\\", for consistency with set syntax. INCOMPATIBILITY. * The constant "UU" has been renamed to "bottom". The syntax "UU" is still supported as an input translation. * Renamed some theorems (the original names are also still available). expand_fun_below ~> fun_below_iff below_fun_ext ~> fun_belowI expand_cfun_eq ~> cfun_eq_iff ext_cfun ~> cfun_eqI expand_cfun_below ~> cfun_below_iff below_cfun_ext ~> cfun_belowI cont2cont_Rep_CFun ~> cont2cont_APP * The Abs and Rep functions for various types have changed names. Related theorem names have also changed to match. INCOMPATIBILITY. Rep_CFun ~> Rep_cfun Abs_CFun ~> Abs_cfun Rep_Sprod ~> Rep_sprod Abs_Sprod ~> Abs_sprod Rep_Ssum ~> Rep_ssum Abs_Ssum ~> Abs_ssum * Lemmas with names of the form *_defined_iff or *_strict_iff have been renamed to *_bottom_iff. INCOMPATIBILITY. * Various changes to bisimulation/coinduction with domain package: - Definitions of "bisim" constants no longer mention definedness. - With mutual recursion, "bisim" predicate is now curried. - With mutual recursion, each type gets a separate coind theorem. - Variable names in bisim_def and coinduct rules have changed. INCOMPATIBILITY. * Case combinators generated by the domain package for type "foo" are now named "foo_case" instead of "foo_when". INCOMPATIBILITY. * Several theorems have been renamed to more accurately reflect the names of constants and types involved. INCOMPATIBILITY. thelub_const ~> lub_const lub_const ~> is_lub_const thelubI ~> lub_eqI is_lub_lub ~> is_lubD2 lubI ~> is_lub_lub unique_lub ~> is_lub_unique is_ub_lub ~> is_lub_rangeD1 lub_bin_chain ~> is_lub_bin_chain lub_fun ~> is_lub_fun thelub_fun ~> lub_fun thelub_cfun ~> lub_cfun thelub_Pair ~> lub_Pair lub_cprod ~> is_lub_prod thelub_cprod ~> lub_prod minimal_cprod ~> minimal_prod inst_cprod_pcpo ~> inst_prod_pcpo UU_I ~> bottomI compact_UU ~> compact_bottom deflation_UU ~> deflation_bottom finite_deflation_UU ~> finite_deflation_bottom * Many legacy theorem names have been discontinued. INCOMPATIBILITY. sq_ord_less_eq_trans ~> below_eq_trans sq_ord_eq_less_trans ~> eq_below_trans refl_less ~> below_refl trans_less ~> below_trans antisym_less ~> below_antisym antisym_less_inverse ~> po_eq_conv [THEN iffD1] box_less ~> box_below rev_trans_less ~> rev_below_trans not_less2not_eq ~> not_below2not_eq less_UU_iff ~> below_UU_iff flat_less_iff ~> flat_below_iff adm_less ~> adm_below adm_not_less ~> adm_not_below adm_compact_not_less ~> adm_compact_not_below less_fun_def ~> below_fun_def expand_fun_less ~> fun_below_iff less_fun_ext ~> fun_belowI less_discr_def ~> below_discr_def discr_less_eq ~> discr_below_eq less_unit_def ~> below_unit_def less_cprod_def ~> below_prod_def prod_lessI ~> prod_belowI Pair_less_iff ~> Pair_below_iff fst_less_iff ~> fst_below_iff snd_less_iff ~> snd_below_iff expand_cfun_less ~> cfun_below_iff less_cfun_ext ~> cfun_belowI injection_less ~> injection_below less_up_def ~> below_up_def not_Iup_less ~> not_Iup_below Iup_less ~> Iup_below up_less ~> up_below Def_inject_less_eq ~> Def_below_Def Def_less_is_eq ~> Def_below_iff spair_less_iff ~> spair_below_iff less_sprod ~> below_sprod spair_less ~> spair_below sfst_less_iff ~> sfst_below_iff ssnd_less_iff ~> ssnd_below_iff fix_least_less ~> fix_least_below dist_less_one ~> dist_below_one less_ONE ~> below_ONE ONE_less_iff ~> ONE_below_iff less_sinlD ~> below_sinlD less_sinrD ~> below_sinrD *** FOL and ZF *** * All constant names are now qualified internally and use proper identifiers, e.g. "IFOL.eq" instead of "op =". INCOMPATIBILITY. *** ML *** * Antiquotation @{assert} inlines a function bool -> unit that raises Fail if the argument is false. Due to inlining the source position of failed assertions is included in the error output. * Discontinued antiquotation @{theory_ref}, which is obsolete since ML text is in practice always evaluated with a stable theory checkpoint. Minor INCOMPATIBILITY, use (Theory.check_thy @{theory}) instead. * Antiquotation @{theory A} refers to theory A from the ancestry of the current context, not any accidental theory loader state as before. Potential INCOMPATIBILITY, subtle change in semantics. * Syntax.pretty_priority (default 0) configures the required priority of pretty-printed output and thus affects insertion of parentheses. * Syntax.default_root (default "any") configures the inner syntax category (nonterminal symbol) for parsing of terms. * Former exception Library.UnequalLengths now coincides with ListPair.UnequalLengths. * Renamed structure MetaSimplifier to Raw_Simplifier. Note that the main functionality is provided by structure Simplifier. * Renamed raw "explode" function to "raw_explode" to emphasize its meaning. Note that internally to Isabelle, Symbol.explode is used in almost all situations. * Discontinued obsolete function sys_error and exception SYS_ERROR. See implementation manual for further details on exceptions in Isabelle/ML. * Renamed setmp_noncritical to Unsynchronized.setmp to emphasize its meaning. * Renamed structure PureThy to Pure_Thy and moved most of its operations to structure Global_Theory, to emphasize that this is rarely-used global-only stuff. * Discontinued Output.debug. Minor INCOMPATIBILITY, use plain writeln instead (or tracing for high-volume output). * Configuration option show_question_marks only affects regular pretty printing of types and terms, not raw Term.string_of_vname. * ML_Context.thm and ML_Context.thms are no longer pervasive. Rare INCOMPATIBILITY, superseded by static antiquotations @{thm} and @{thms} for most purposes. * ML structure Unsynchronized is never opened, not even in Isar interaction mode as before. Old Unsynchronized.set etc. have been discontinued -- use plain := instead. This should be *rare* anyway, since modern tools always work via official context data, notably configuration options. * Parallel and asynchronous execution requires special care concerning interrupts. Structure Exn provides some convenience functions that avoid working directly with raw Interrupt. User code must not absorb interrupts -- intermediate handling (for cleanup etc.) needs to be followed by re-raising of the original exception. Another common source of mistakes are "handle _" patterns, which make the meaning of the program subject to physical effects of the environment. New in Isabelle2009-2 (June 2010) --------------------------------- *** General *** * Authentic syntax for *all* logical entities (type classes, type constructors, term constants): provides simple and robust correspondence between formal entities and concrete syntax. Within the parse tree / AST representations, "constants" are decorated by their category (class, type, const) and spelled out explicitly with their full internal name. Substantial INCOMPATIBILITY concerning low-level syntax declarations and translations (translation rules and translation functions in ML). Some hints on upgrading: - Many existing uses of 'syntax' and 'translations' can be replaced by more modern 'type_notation', 'notation' and 'abbreviation', which are independent of this issue. - 'translations' require markup within the AST; the term syntax provides the following special forms: CONST c -- produces syntax version of constant c from context XCONST c -- literally c, checked as constant from context c -- literally c, if declared by 'syntax' Plain identifiers are treated as AST variables -- occasionally the system indicates accidental variables via the error "rhs contains extra variables". Type classes and type constructors are marked according to their concrete syntax. Some old translations rules need to be written for the "type" category, using type constructor application instead of pseudo-term application of the default category "logic". - 'parse_translation' etc. in ML may use the following antiquotations: @{class_syntax c} -- type class c within parse tree / AST @{term_syntax c} -- type constructor c within parse tree / AST @{const_syntax c} -- ML version of "CONST c" above @{syntax_const c} -- literally c (checked wrt. 'syntax' declarations) - Literal types within 'typed_print_translations', i.e. those *not* represented as pseudo-terms are represented verbatim. Use @{class c} or @{type_name c} here instead of the above syntax antiquotations. Note that old non-authentic syntax was based on unqualified base names, so all of the above "constant" names would coincide. Recall that 'print_syntax' and ML_command "set Syntax.trace_ast" help to diagnose syntax problems. * Type constructors admit general mixfix syntax, not just infix. * Concrete syntax may be attached to local entities without a proof body, too. This works via regular mixfix annotations for 'fix', 'def', 'obtain' etc. or via the explicit 'write' command, which is similar to the 'notation' command in theory specifications. * Discontinued unnamed infix syntax (legacy feature for many years) -- need to specify constant name and syntax separately. Internal ML datatype constructors have been renamed from InfixName to Infix etc. Minor INCOMPATIBILITY. * Schematic theorem statements need to be explicitly markup as such, via commands 'schematic_lemma', 'schematic_theorem', 'schematic_corollary'. Thus the relevance of the proof is made syntactically clear, which impacts performance in a parallel or asynchronous interactive environment. Minor INCOMPATIBILITY. * Use of cumulative prems via "!" in some proof methods has been discontinued (old legacy feature). * References 'trace_simp' and 'debug_simp' have been replaced by configuration options stored in the context. Enabling tracing (the case of debugging is similar) in proofs works via using [[trace_simp = true]] Tracing is then active for all invocations of the simplifier in subsequent goal refinement steps. Tracing may also still be enabled or disabled via the ProofGeneral settings menu. * Separate commands 'hide_class', 'hide_type', 'hide_const', 'hide_fact' replace the former 'hide' KIND command. Minor INCOMPATIBILITY. * Improved parallelism of proof term normalization: usedir -p2 -q0 is more efficient than combinations with -q1 or -q2. *** Pure *** * Proofterms record type-class reasoning explicitly, using the "unconstrain" operation internally. This eliminates all sort constraints from a theorem and proof, introducing explicit OFCLASS-premises. On the proof term level, this operation is automatically applied at theorem boundaries, such that closed proofs are always free of sort constraints. INCOMPATIBILITY for tools that inspect proof terms. * Local theory specifications may depend on extra type variables that are not present in the result type -- arguments TYPE('a) :: 'a itself are added internally. For example: definition unitary :: bool where "unitary = (ALL (x::'a) y. x = y)" * Predicates of locales introduced by classes carry a mandatory "class" prefix. INCOMPATIBILITY. * Vacuous class specifications observe default sort. INCOMPATIBILITY. * Old 'axclass' command has been discontinued. INCOMPATIBILITY, use 'class' instead. * Command 'code_reflect' allows to incorporate generated ML code into runtime environment; replaces immature code_datatype antiquotation. INCOMPATIBILITY. * Code generator: simple concept for abstract datatypes obeying invariants. * Code generator: details of internal data cache have no impact on the user space functionality any longer. * Methods "unfold_locales" and "intro_locales" ignore non-locale subgoals. This is more appropriate for interpretations with 'where'. INCOMPATIBILITY. * Command 'example_proof' opens an empty proof body. This allows to experiment with Isar, without producing any persistent result. * Commands 'type_notation' and 'no_type_notation' declare type syntax within a local theory context, with explicit checking of the constructors involved (in contrast to the raw 'syntax' versions). * Commands 'types' and 'typedecl' now work within a local theory context -- without introducing dependencies on parameters or assumptions, which is not possible in Isabelle/Pure. * Command 'defaultsort' has been renamed to 'default_sort', it works within a local theory context. Minor INCOMPATIBILITY. *** HOL *** * Command 'typedef' now works within a local theory context -- without introducing dependencies on parameters or assumptions, which is not possible in Isabelle/Pure/HOL. Note that the logical environment may contain multiple interpretations of local typedefs (with different non-emptiness proofs), even in a global theory context. * New package for quotient types. Commands 'quotient_type' and 'quotient_definition' may be used for defining types and constants by quotient constructions. An example is the type of integers created by quotienting pairs of natural numbers: fun intrel :: "(nat * nat) => (nat * nat) => bool" where "intrel (x, y) (u, v) = (x + v = u + y)" quotient_type int = "nat * nat" / intrel by (auto simp add: equivp_def expand_fun_eq) quotient_definition "0::int" is "(0::nat, 0::nat)" The method "lifting" can be used to lift of theorems from the underlying "raw" type to the quotient type. The example src/HOL/Quotient_Examples/FSet.thy includes such a quotient construction and provides a reasoning infrastructure for finite sets. * Renamed Library/Quotient.thy to Library/Quotient_Type.thy to avoid clash with new theory Quotient in Main HOL. * Moved the SMT binding into the main HOL session, eliminating separate HOL-SMT session. * List membership infix mem operation is only an input abbreviation. INCOMPATIBILITY. * Theory Library/Word.thy has been removed. Use library Word/Word.thy for future developements; former Library/Word.thy is still present in the AFP entry RSAPPS. * Theorem Int.int_induct renamed to Int.int_of_nat_induct and is no longer shadowed. INCOMPATIBILITY. * Dropped theorem duplicate comp_arith; use semiring_norm instead. INCOMPATIBILITY. * Dropped theorem RealPow.real_sq_order; use power2_le_imp_le instead. INCOMPATIBILITY. * Dropped normalizing_semiring etc; use the facts in semiring classes instead. INCOMPATIBILITY. * Dropped several real-specific versions of lemmas about floor and ceiling; use the generic lemmas from theory "Archimedean_Field" instead. INCOMPATIBILITY. floor_number_of_eq ~> floor_number_of le_floor_eq_number_of ~> number_of_le_floor le_floor_eq_zero ~> zero_le_floor le_floor_eq_one ~> one_le_floor floor_less_eq_number_of ~> floor_less_number_of floor_less_eq_zero ~> floor_less_zero floor_less_eq_one ~> floor_less_one less_floor_eq_number_of ~> number_of_less_floor less_floor_eq_zero ~> zero_less_floor less_floor_eq_one ~> one_less_floor floor_le_eq_number_of ~> floor_le_number_of floor_le_eq_zero ~> floor_le_zero floor_le_eq_one ~> floor_le_one floor_subtract_number_of ~> floor_diff_number_of floor_subtract_one ~> floor_diff_one ceiling_number_of_eq ~> ceiling_number_of ceiling_le_eq_number_of ~> ceiling_le_number_of ceiling_le_zero_eq ~> ceiling_le_zero ceiling_le_eq_one ~> ceiling_le_one less_ceiling_eq_number_of ~> number_of_less_ceiling less_ceiling_eq_zero ~> zero_less_ceiling less_ceiling_eq_one ~> one_less_ceiling ceiling_less_eq_number_of ~> ceiling_less_number_of ceiling_less_eq_zero ~> ceiling_less_zero ceiling_less_eq_one ~> ceiling_less_one le_ceiling_eq_number_of ~> number_of_le_ceiling le_ceiling_eq_zero ~> zero_le_ceiling le_ceiling_eq_one ~> one_le_ceiling ceiling_subtract_number_of ~> ceiling_diff_number_of ceiling_subtract_one ~> ceiling_diff_one * Theory "Finite_Set": various folding_XXX locales facilitate the application of the various fold combinators on finite sets. * Library theory "RBT" renamed to "RBT_Impl"; new library theory "RBT" provides abstract red-black tree type which is backed by "RBT_Impl" as implementation. INCOMPATIBILITY. * Theory Library/Coinductive_List has been removed -- superseded by AFP/thys/Coinductive. * Theory PReal, including the type "preal" and related operations, has been removed. INCOMPATIBILITY. * Real: new development using Cauchy Sequences. * Split off theory "Big_Operators" containing setsum, setprod, Inf_fin, Sup_fin, Min, Max from theory Finite_Set. INCOMPATIBILITY. * Theory "Rational" renamed to "Rat", for consistency with "Nat", "Int" etc. INCOMPATIBILITY. * Constant Rat.normalize needs to be qualified. INCOMPATIBILITY. * New set of rules "ac_simps" provides combined assoc / commute rewrites for all interpretations of the appropriate generic locales. * Renamed theory "OrderedGroup" to "Groups" and split theory "Ring_and_Field" into theories "Rings" and "Fields"; for more appropriate and more consistent names suitable for name prefixes within the HOL theories. INCOMPATIBILITY. * Some generic constants have been put to appropriate theories: - less_eq, less: Orderings - zero, one, plus, minus, uminus, times, abs, sgn: Groups - inverse, divide: Rings INCOMPATIBILITY. * More consistent naming of type classes involving orderings (and lattices): lower_semilattice ~> semilattice_inf upper_semilattice ~> semilattice_sup dense_linear_order ~> dense_linorder pordered_ab_group_add ~> ordered_ab_group_add pordered_ab_group_add_abs ~> ordered_ab_group_add_abs pordered_ab_semigroup_add ~> ordered_ab_semigroup_add pordered_ab_semigroup_add_imp_le ~> ordered_ab_semigroup_add_imp_le pordered_cancel_ab_semigroup_add ~> ordered_cancel_ab_semigroup_add pordered_cancel_comm_semiring ~> ordered_cancel_comm_semiring pordered_cancel_semiring ~> ordered_cancel_semiring pordered_comm_monoid_add ~> ordered_comm_monoid_add pordered_comm_ring ~> ordered_comm_ring pordered_comm_semiring ~> ordered_comm_semiring pordered_ring ~> ordered_ring pordered_ring_abs ~> ordered_ring_abs pordered_semiring ~> ordered_semiring ordered_ab_group_add ~> linordered_ab_group_add ordered_ab_semigroup_add ~> linordered_ab_semigroup_add ordered_cancel_ab_semigroup_add ~> linordered_cancel_ab_semigroup_add ordered_comm_semiring_strict ~> linordered_comm_semiring_strict ordered_field ~> linordered_field ordered_field_no_lb ~> linordered_field_no_lb ordered_field_no_ub ~> linordered_field_no_ub ordered_field_dense_linear_order ~> dense_linordered_field ordered_idom ~> linordered_idom ordered_ring ~> linordered_ring ordered_ring_le_cancel_factor ~> linordered_ring_le_cancel_factor ordered_ring_less_cancel_factor ~> linordered_ring_less_cancel_factor ordered_ring_strict ~> linordered_ring_strict ordered_semidom ~> linordered_semidom ordered_semiring ~> linordered_semiring ordered_semiring_1 ~> linordered_semiring_1 ordered_semiring_1_strict ~> linordered_semiring_1_strict ordered_semiring_strict ~> linordered_semiring_strict The following slightly odd type classes have been moved to a separate theory Library/Lattice_Algebras: lordered_ab_group_add ~> lattice_ab_group_add lordered_ab_group_add_abs ~> lattice_ab_group_add_abs lordered_ab_group_add_meet ~> semilattice_inf_ab_group_add lordered_ab_group_add_join ~> semilattice_sup_ab_group_add lordered_ring ~> lattice_ring INCOMPATIBILITY. * Refined field classes: - classes division_ring_inverse_zero, field_inverse_zero, linordered_field_inverse_zero include rule inverse 0 = 0 -- subsumes former division_by_zero class; - numerous lemmas have been ported from field to division_ring. INCOMPATIBILITY. * Refined algebra theorem collections: - dropped theorem group group_simps, use algebra_simps instead; - dropped theorem group ring_simps, use field_simps instead; - proper theorem collection field_simps subsumes former theorem groups field_eq_simps and field_simps; - dropped lemma eq_minus_self_iff which is a duplicate for equal_neg_zero. INCOMPATIBILITY. * Theory Finite_Set and List: some lemmas have been generalized from sets to lattices: fun_left_comm_idem_inter ~> fun_left_comm_idem_inf fun_left_comm_idem_union ~> fun_left_comm_idem_sup inter_Inter_fold_inter ~> inf_Inf_fold_inf union_Union_fold_union ~> sup_Sup_fold_sup Inter_fold_inter ~> Inf_fold_inf Union_fold_union ~> Sup_fold_sup inter_INTER_fold_inter ~> inf_INFI_fold_inf union_UNION_fold_union ~> sup_SUPR_fold_sup INTER_fold_inter ~> INFI_fold_inf UNION_fold_union ~> SUPR_fold_sup * Theory "Complete_Lattice": lemmas top_def and bot_def have been replaced by the more convenient lemmas Inf_empty and Sup_empty. Dropped lemmas Inf_insert_simp and Sup_insert_simp, which are subsumed by Inf_insert and Sup_insert. Lemmas Inf_UNIV and Sup_UNIV replace former Inf_Univ and Sup_Univ. Lemmas inf_top_right and sup_bot_right subsume inf_top and sup_bot respectively. INCOMPATIBILITY. * Reorganized theory Multiset: swapped notation of pointwise and multiset order: - pointwise ordering is instance of class order with standard syntax <= and <; - multiset ordering has syntax <=# and <#; partial order properties are provided by means of interpretation with prefix multiset_order; - less duplication, less historical organization of sections, conversion from associations lists to multisets, rudimentary code generation; - use insert_DiffM2 [symmetric] instead of elem_imp_eq_diff_union, if needed. Renamed: multiset_eq_conv_count_eq ~> multiset_ext_iff multi_count_ext ~> multiset_ext diff_union_inverse2 ~> diff_union_cancelR INCOMPATIBILITY. * Theory Permutation: replaced local "remove" by List.remove1. * Code generation: ML and OCaml code is decorated with signatures. * Theory List: added transpose. * Library/Nat_Bijection.thy is a collection of bijective functions between nat and other types, which supersedes the older libraries Library/Nat_Int_Bij.thy and HOLCF/NatIso.thy. INCOMPATIBILITY. Constants: Nat_Int_Bij.nat2_to_nat ~> prod_encode Nat_Int_Bij.nat_to_nat2 ~> prod_decode Nat_Int_Bij.int_to_nat_bij ~> int_encode Nat_Int_Bij.nat_to_int_bij ~> int_decode Countable.pair_encode ~> prod_encode NatIso.prod2nat ~> prod_encode NatIso.nat2prod ~> prod_decode NatIso.sum2nat ~> sum_encode NatIso.nat2sum ~> sum_decode NatIso.list2nat ~> list_encode NatIso.nat2list ~> list_decode NatIso.set2nat ~> set_encode NatIso.nat2set ~> set_decode Lemmas: Nat_Int_Bij.bij_nat_to_int_bij ~> bij_int_decode Nat_Int_Bij.nat2_to_nat_inj ~> inj_prod_encode Nat_Int_Bij.nat2_to_nat_surj ~> surj_prod_encode Nat_Int_Bij.nat_to_nat2_inj ~> inj_prod_decode Nat_Int_Bij.nat_to_nat2_surj ~> surj_prod_decode Nat_Int_Bij.i2n_n2i_id ~> int_encode_inverse Nat_Int_Bij.n2i_i2n_id ~> int_decode_inverse Nat_Int_Bij.surj_nat_to_int_bij ~> surj_int_encode Nat_Int_Bij.surj_int_to_nat_bij ~> surj_int_decode Nat_Int_Bij.inj_nat_to_int_bij ~> inj_int_encode Nat_Int_Bij.inj_int_to_nat_bij ~> inj_int_decode Nat_Int_Bij.bij_nat_to_int_bij ~> bij_int_encode Nat_Int_Bij.bij_int_to_nat_bij ~> bij_int_decode * Sledgehammer: - Renamed ATP commands: atp_info ~> sledgehammer running_atps atp_kill ~> sledgehammer kill_atps atp_messages ~> sledgehammer messages atp_minimize ~> sledgehammer minimize print_atps ~> sledgehammer available_atps INCOMPATIBILITY. - Added user's manual ("isabelle doc sledgehammer"). - Added option syntax and "sledgehammer_params" to customize Sledgehammer's behavior. See the manual for details. - Modified the Isar proof reconstruction code so that it produces direct proofs rather than proofs by contradiction. (This feature is still experimental.) - Made Isar proof reconstruction work for SPASS, remote ATPs, and in full-typed mode. - Added support for TPTP syntax for SPASS via the "spass_tptp" ATP. * Nitpick: - Added and implemented "binary_ints" and "bits" options. - Added "std" option and implemented support for nonstandard models. - Added and implemented "finitize" option to improve the precision of infinite datatypes based on a monotonicity analysis. - Added support for quotient types. - Added support for "specification" and "ax_specification" constructs. - Added support for local definitions (for "function" and "termination" proofs). - Added support for term postprocessors. - Optimized "Multiset.multiset" and "FinFun.finfun". - Improved efficiency of "destroy_constrs" optimization. - Fixed soundness bugs related to "destroy_constrs" optimization and record getters. - Fixed soundness bug related to higher-order constructors. - Fixed soundness bug when "full_descrs" is enabled. - Improved precision of set constructs. - Added "atoms" option. - Added cache to speed up repeated Kodkod invocations on the same problems. - Renamed "MiniSatJNI", "zChaffJNI", "BerkMinAlloy", and "SAT4JLight" to "MiniSat_JNI", "zChaff_JNI", "BerkMin_Alloy", and "SAT4J_Light". INCOMPATIBILITY. - Removed "skolemize", "uncurry", "sym_break", "flatten_prop", "sharing_depth", and "show_skolems" options. INCOMPATIBILITY. - Removed "nitpick_intro" attribute. INCOMPATIBILITY. * Method "induct" now takes instantiations of the form t, where t is not a variable, as a shorthand for "x == t", where x is a fresh variable. If this is not intended, t has to be enclosed in parentheses. By default, the equalities generated by definitional instantiations are pre-simplified, which may cause parameters of inductive cases to disappear, or may even delete some of the inductive cases. Use "induct (no_simp)" instead of "induct" to restore the old behaviour. The (no_simp) option is also understood by the "cases" and "nominal_induct" methods, which now perform pre-simplification, too. INCOMPATIBILITY. *** HOLCF *** * Variable names in lemmas generated by the domain package have changed; the naming scheme is now consistent with the HOL datatype package. Some proof scripts may be affected, INCOMPATIBILITY. * The domain package no longer defines the function "foo_copy" for recursive domain "foo". The reach lemma is now stated directly in terms of "foo_take". Lemmas and proofs that mention "foo_copy" must be reformulated in terms of "foo_take", INCOMPATIBILITY. * Most definedness lemmas generated by the domain package (previously of the form "x ~= UU ==> foo$x ~= UU") now have an if-and-only-if form like "foo$x = UU <-> x = UU", which works better as a simp rule. Proofs that used definedness lemmas as intro rules may break, potential INCOMPATIBILITY. * Induction and casedist rules generated by the domain package now declare proper case_names (one called "bottom", and one named for each constructor). INCOMPATIBILITY. * For mutually-recursive domains, separate "reach" and "take_lemma" rules are generated for each domain, INCOMPATIBILITY. foo_bar.reach ~> foo.reach bar.reach foo_bar.take_lemmas ~> foo.take_lemma bar.take_lemma * Some lemmas generated by the domain package have been renamed for consistency with the datatype package, INCOMPATIBILITY. foo.ind ~> foo.induct foo.finite_ind ~> foo.finite_induct foo.coind ~> foo.coinduct foo.casedist ~> foo.exhaust foo.exhaust ~> foo.nchotomy * For consistency with other definition packages, the fixrec package now generates qualified theorem names, INCOMPATIBILITY. foo_simps ~> foo.simps foo_unfold ~> foo.unfold foo_induct ~> foo.induct * The "fixrec_simp" attribute has been removed. The "fixrec_simp" method and internal fixrec proofs now use the default simpset instead. INCOMPATIBILITY. * The "contlub" predicate has been removed. Proof scripts should use lemma contI2 in place of monocontlub2cont, INCOMPATIBILITY. * The "admw" predicate has been removed, INCOMPATIBILITY. * The constants cpair, cfst, and csnd have been removed in favor of Pair, fst, and snd from Isabelle/HOL, INCOMPATIBILITY. *** ML *** * Antiquotations for basic formal entities: @{class NAME} -- type class @{class_syntax NAME} -- syntax representation of the above @{type_name NAME} -- logical type @{type_abbrev NAME} -- type abbreviation @{nonterminal NAME} -- type of concrete syntactic category @{type_syntax NAME} -- syntax representation of any of the above @{const_name NAME} -- logical constant (INCOMPATIBILITY) @{const_abbrev NAME} -- abbreviated constant @{const_syntax NAME} -- syntax representation of any of the above * Antiquotation @{syntax_const NAME} ensures that NAME refers to a raw syntax constant (cf. 'syntax' command). * Antiquotation @{make_string} inlines a function to print arbitrary values similar to the ML toplevel. The result is compiler dependent and may fall back on "?" in certain situations. * Diagnostic commands 'ML_val' and 'ML_command' may refer to antiquotations @{Isar.state} and @{Isar.goal}. This replaces impure Isar.state() and Isar.goal(), which belong to the old TTY loop and do not work with the asynchronous Isar document model. * Configuration options now admit dynamic default values, depending on the context or even global references. * SHA1.digest digests strings according to SHA-1 (see RFC 3174). It uses an efficient external library if available (for Poly/ML). * Renamed some important ML structures, while keeping the old names for some time as aliases within the structure Legacy: OuterKeyword ~> Keyword OuterLex ~> Token OuterParse ~> Parse OuterSyntax ~> Outer_Syntax PrintMode ~> Print_Mode SpecParse ~> Parse_Spec ThyInfo ~> Thy_Info ThyLoad ~> Thy_Load ThyOutput ~> Thy_Output TypeInfer ~> Type_Infer Note that "open Legacy" simplifies porting of sources, but forgetting to remove it again will complicate porting again in the future. * Most operations that refer to a global context are named accordingly, e.g. Simplifier.global_context or ProofContext.init_global. There are some situations where a global context actually works, but under normal circumstances one needs to pass the proper local context through the code! * Discontinued old TheoryDataFun with its copy/init operation -- data needs to be pure. Functor Theory_Data_PP retains the traditional Pretty.pp argument to merge, which is absent in the standard Theory_Data version. * Sorts.certify_sort and derived "cert" operations for types and terms no longer minimize sorts. Thus certification at the boundary of the inference kernel becomes invariant under addition of class relations, which is an important monotonicity principle. Sorts are now minimized in the syntax layer only, at the boundary between the end-user and the system. Subtle INCOMPATIBILITY, may have to use Sign.minimize_sort explicitly in rare situations. * Renamed old-style Drule.standard to Drule.export_without_context, to emphasize that this is in no way a standard operation. INCOMPATIBILITY. * Subgoal.FOCUS (and variants): resulting goal state is normalized as usual for resolution. Rare INCOMPATIBILITY. * Renamed varify/unvarify operations to varify_global/unvarify_global to emphasize that these only work in a global situation (which is quite rare). * Curried take and drop in library.ML; negative length is interpreted as infinity (as in chop). Subtle INCOMPATIBILITY. * Proof terms: type substitutions on proof constants now use canonical order of type variables. INCOMPATIBILITY for tools working with proof terms. * Raw axioms/defs may no longer carry sort constraints, and raw defs may no longer carry premises. User-level specifications are transformed accordingly by Thm.add_axiom/add_def. *** System *** * Discontinued special HOL_USEDIR_OPTIONS for the main HOL image; ISABELLE_USEDIR_OPTIONS applies uniformly to all sessions. Note that proof terms are enabled unconditionally in the new HOL-Proofs image. * Discontinued old ISABELLE and ISATOOL environment settings (legacy feature since Isabelle2009). Use ISABELLE_PROCESS and ISABELLE_TOOL, respectively. * Old lib/scripts/polyml-platform is superseded by the ISABELLE_PLATFORM setting variable, which defaults to the 32 bit variant, even on a 64 bit machine. The following example setting prefers 64 bit if available: ML_PLATFORM="${ISABELLE_PLATFORM64:-$ISABELLE_PLATFORM}" * The preliminary Isabelle/jEdit application demonstrates the emerging Isabelle/Scala layer for advanced prover interaction and integration. See src/Tools/jEdit or "isabelle jedit" provided by the properly built component. * "IsabelleText" is a Unicode font derived from Bitstream Vera Mono and Bluesky TeX fonts. It provides the usual Isabelle symbols, similar to the default assignment of the document preparation system (cf. isabellesym.sty). The Isabelle/Scala class Isabelle_System provides some operations for direct access to the font without asking the user for manual installation. New in Isabelle2009-1 (December 2009) ------------------------------------- *** General *** * Discontinued old form of "escaped symbols" such as \\. Only one backslash should be used, even in ML sources. *** Pure *** * Locale interpretation propagates mixins along the locale hierarchy. The currently only available mixins are the equations used to map local definitions to terms of the target domain of an interpretation. * Reactivated diagnostic command 'print_interps'. Use "print_interps loc" to print all interpretations of locale "loc" in the theory. Interpretations in proofs are not shown. * Thoroughly revised locales tutorial. New section on conditional interpretation. * On instantiation of classes, remaining undefined class parameters are formally declared. INCOMPATIBILITY. *** Document preparation *** * New generalized style concept for printing terms: @{foo (style) ...} instead of @{foo_style style ...} (old form is still retained for backward compatibility). Styles can be also applied for antiquotations prop, term_type and typeof. *** HOL *** * New proof method "smt" for a combination of first-order logic with equality, linear and nonlinear (natural/integer/real) arithmetic, and fixed-size bitvectors; there is also basic support for higher-order features (esp. lambda abstractions). It is an incomplete decision procedure based on external SMT solvers using the oracle mechanism; for the SMT solver Z3, this method is proof-producing. Certificates are provided to avoid calling the external solvers solely for re-checking proofs. Due to a remote SMT service there is no need for installing SMT solvers locally. See src/HOL/SMT. * New commands to load and prove verification conditions generated by the Boogie program verifier or derived systems (e.g. the Verifying C Compiler (VCC) or Spec#). See src/HOL/Boogie. * New counterexample generator tool 'nitpick' based on the Kodkod relational model finder. See src/HOL/Tools/Nitpick and src/HOL/Nitpick_Examples. * New commands 'code_pred' and 'values' to invoke the predicate compiler and to enumerate values of inductive predicates. * A tabled implementation of the reflexive transitive closure. * New implementation of quickcheck uses generic code generator; default generators are provided for all suitable HOL types, records and datatypes. Old quickcheck can be re-activated importing theory Library/SML_Quickcheck. * New testing tool Mirabelle for automated proof tools. Applies several tools and tactics like sledgehammer, metis, or quickcheck, to every proof step in a theory. To be used in batch mode via the "mirabelle" utility. * New proof method "sos" (sum of squares) for nonlinear real arithmetic (originally due to John Harison). It requires theory Library/Sum_Of_Squares. It is not a complete decision procedure but works well in practice on quantifier-free real arithmetic with +, -, *, ^, =, <= and <, i.e. boolean combinations of equalities and inequalities between polynomials. It makes use of external semidefinite programming solvers. Method "sos" generates a certificate that can be pasted into the proof thus avoiding the need to call an external tool every time the proof is checked. See src/HOL/Library/Sum_Of_Squares. * New method "linarith" invokes existing linear arithmetic decision procedure only. * New command 'atp_minimal' reduces result produced by Sledgehammer. * New Sledgehammer option "Full Types" in Proof General settings menu. Causes full type information to be output to the ATPs. This slows ATPs down considerably but eliminates a source of unsound "proofs" that fail later. * New method "metisFT": A version of metis that uses full type information in order to avoid failures of proof reconstruction. * New evaluator "approximate" approximates an real valued term using the same method as the approximation method. * Method "approximate" now supports arithmetic expressions as boundaries of intervals and implements interval splitting and Taylor series expansion. * ML antiquotation @{code_datatype} inserts definition of a datatype generated by the code generator; e.g. see src/HOL/Predicate.thy. * New theory SupInf of the supremum and infimum operators for sets of reals. * New theory Probability, which contains a development of measure theory, eventually leading to Lebesgue integration and probability. * Extended Multivariate Analysis to include derivation and Brouwer's fixpoint theorem. * Reorganization of number theory, INCOMPATIBILITY: - new number theory development for nat and int, in theories Divides and GCD as well as in new session Number_Theory - some constants and facts now suffixed with _nat and _int accordingly - former session NumberTheory now named Old_Number_Theory, including theories Legacy_GCD and Primes (prefer Number_Theory if possible) - moved theory Pocklington from src/HOL/Library to src/HOL/Old_Number_Theory * Theory GCD includes functions Gcd/GCD and Lcm/LCM for the gcd and lcm of finite and infinite sets. It is shown that they form a complete lattice. * Class semiring_div requires superclass no_zero_divisors and proof of div_mult_mult1; theorems div_mult_mult1, div_mult_mult2, div_mult_mult1_if, div_mult_mult1 and div_mult_mult2 have been generalized to class semiring_div, subsuming former theorems zdiv_zmult_zmult1, zdiv_zmult_zmult1_if, zdiv_zmult_zmult1 and zdiv_zmult_zmult2. div_mult_mult1 is now [simp] by default. INCOMPATIBILITY. * Refinements to lattice classes and sets: - less default intro/elim rules in locale variant, more default intro/elim rules in class variant: more uniformity - lemma ge_sup_conv renamed to le_sup_iff, in accordance with le_inf_iff - dropped lemma alias inf_ACI for inf_aci (same for sup_ACI and sup_aci) - renamed ACI to inf_sup_aci - new class "boolean_algebra" - class "complete_lattice" moved to separate theory "Complete_Lattice"; corresponding constants (and abbreviations) renamed and with authentic syntax: Set.Inf ~> Complete_Lattice.Inf Set.Sup ~> Complete_Lattice.Sup Set.INFI ~> Complete_Lattice.INFI Set.SUPR ~> Complete_Lattice.SUPR Set.Inter ~> Complete_Lattice.Inter Set.Union ~> Complete_Lattice.Union Set.INTER ~> Complete_Lattice.INTER Set.UNION ~> Complete_Lattice.UNION - authentic syntax for Set.Pow Set.image - mere abbreviations: Set.empty (for bot) Set.UNIV (for top) Set.inter (for inf, formerly Set.Int) Set.union (for sup, formerly Set.Un) Complete_Lattice.Inter (for Inf) Complete_Lattice.Union (for Sup) Complete_Lattice.INTER (for INFI) Complete_Lattice.UNION (for SUPR) - object-logic definitions as far as appropriate INCOMPATIBILITY. Care is required when theorems Int_subset_iff or Un_subset_iff are explicitly deleted as default simp rules; then also their lattice counterparts le_inf_iff and le_sup_iff have to be deleted to achieve the desired effect. * Rules inf_absorb1, inf_absorb2, sup_absorb1, sup_absorb2 are no simp rules by default any longer; the same applies to min_max.inf_absorb1 etc. INCOMPATIBILITY. * Rules sup_Int_eq and sup_Un_eq are no longer declared as pred_set_conv by default. INCOMPATIBILITY. * Power operations on relations and functions are now one dedicated constant "compow" with infix syntax "^^". Power operation on multiplicative monoids retains syntax "^" and is now defined generic in class power. INCOMPATIBILITY. * Relation composition "R O S" now has a more standard argument order: "R O S = {(x, z). EX y. (x, y) : R & (y, z) : S}". INCOMPATIBILITY, rewrite propositions with "S O R" --> "R O S". Proofs may occasionally break, since the O_assoc rule was not rewritten like this. Fix using O_assoc[symmetric]. The same applies to the curried version "R OO S". * Function "Inv" is renamed to "inv_into" and function "inv" is now an abbreviation for "inv_into UNIV". Lemmas are renamed accordingly. INCOMPATIBILITY. * Most rules produced by inductive and datatype package have mandatory prefixes. INCOMPATIBILITY. * Changed "DERIV_intros" to a dynamic fact, which can be augmented by the attribute of the same name. Each of the theorems in the list DERIV_intros assumes composition with an additional function and matches a variable to the derivative, which has to be solved by the Simplifier. Hence (auto intro!: DERIV_intros) computes the derivative of most elementary terms. Former Maclauren.DERIV_tac and Maclauren.deriv_tac should be replaced by (auto intro!: DERIV_intros). INCOMPATIBILITY. * Code generator attributes follow the usual underscore convention: code_unfold replaces code unfold code_post replaces code post etc. INCOMPATIBILITY. * Renamed methods: sizechange -> size_change induct_scheme -> induction_schema INCOMPATIBILITY. * Discontinued abbreviation "arbitrary" of constant "undefined". INCOMPATIBILITY, use "undefined" directly. * Renamed theorems: Suc_eq_add_numeral_1 -> Suc_eq_plus1 Suc_eq_add_numeral_1_left -> Suc_eq_plus1_left Suc_plus1 -> Suc_eq_plus1 *anti_sym -> *antisym* vector_less_eq_def -> vector_le_def INCOMPATIBILITY. * Added theorem List.map_map as [simp]. Removed List.map_compose. INCOMPATIBILITY. * Removed predicate "M hassize n" (<--> card M = n & finite M). INCOMPATIBILITY. *** HOLCF *** * Theory Representable defines a class "rep" of domains that are representable (via an ep-pair) in the universal domain type "udom". Instances are provided for all type constructors defined in HOLCF. * The 'new_domain' command is a purely definitional version of the domain package, for representable domains. Syntax is identical to the old domain package. The 'new_domain' package also supports indirect recursion using previously-defined type constructors. See src/HOLCF/ex/New_Domain.thy for examples. * Method "fixrec_simp" unfolds one step of a fixrec-defined constant on the left-hand side of an equation, and then performs simplification. Rewriting is done using rules declared with the "fixrec_simp" attribute. The "fixrec_simp" method is intended as a replacement for "fixpat"; see src/HOLCF/ex/Fixrec_ex.thy for examples. * The pattern-match compiler in 'fixrec' can now handle constructors with HOL function types. Pattern-match combinators for the Pair constructor are pre-configured. * The 'fixrec' package now produces better fixed-point induction rules for mutually-recursive definitions: Induction rules have conclusions of the form "P foo bar" instead of "P ". * The constant "sq_le" (with infix syntax "<<" or "\") has been renamed to "below". The name "below" now replaces "less" in many theorem names. (Legacy theorem names using "less" are still supported as well.) * The 'fixrec' package now supports "bottom patterns". Bottom patterns can be used to generate strictness rules, or to make functions more strict (much like the bang-patterns supported by the Glasgow Haskell Compiler). See src/HOLCF/ex/Fixrec_ex.thy for examples. *** ML *** * Support for Poly/ML 5.3.0, with improved reporting of compiler errors and run-time exceptions, including detailed source positions. * Structure Name_Space (formerly NameSpace) now manages uniquely identified entries, with some additional information such as source position, logical grouping etc. * Theory and context data is now introduced by the simplified and modernized functors Theory_Data, Proof_Data, Generic_Data. Data needs to be pure, but the old TheoryDataFun for mutable data (with explicit copy operation) is still available for some time. * Structure Synchronized (cf. src/Pure/Concurrent/synchronized.ML) provides a high-level programming interface to synchronized state variables with atomic update. This works via pure function application within a critical section -- its runtime should be as short as possible; beware of deadlocks if critical code is nested, either directly or indirectly via other synchronized variables! * Structure Unsynchronized (cf. src/Pure/ML-Systems/unsynchronized.ML) wraps raw ML references, explicitly indicating their non-thread-safe behaviour. The Isar toplevel keeps this structure open, to accommodate Proof General as well as quick and dirty interactive experiments with references. * PARALLEL_CHOICE and PARALLEL_GOALS provide basic support for parallel tactical reasoning. * Tacticals Subgoal.FOCUS, Subgoal.FOCUS_PREMS, Subgoal.FOCUS_PARAMS are similar to SUBPROOF, but are slightly more flexible: only the specified parts of the subgoal are imported into the context, and the body tactic may introduce new subgoals and schematic variables. * Old tactical METAHYPS, which does not observe the proof context, has been renamed to Old_Goals.METAHYPS and awaits deletion. Use SUBPROOF or Subgoal.FOCUS etc. * Renamed functor TableFun to Table, and GraphFun to Graph. (Since functors have their own ML name space there is no point to mark them separately.) Minor INCOMPATIBILITY. * Renamed NamedThmsFun to Named_Thms. INCOMPATIBILITY. * Renamed several structures FooBar to Foo_Bar. Occasional, INCOMPATIBILITY. * Operations of structure Skip_Proof no longer require quick_and_dirty mode, which avoids critical setmp. * Eliminated old Attrib.add_attributes, Method.add_methods and related combinators for "args". INCOMPATIBILITY, need to use simplified Attrib/Method.setup introduced in Isabelle2009. * Proper context for simpset_of, claset_of, clasimpset_of. May fall back on global_simpset_of, global_claset_of, global_clasimpset_of as last resort. INCOMPATIBILITY. * Display.pretty_thm now requires a proper context (cf. former ProofContext.pretty_thm). May fall back on Display.pretty_thm_global or even Display.pretty_thm_without_context as last resort. INCOMPATIBILITY. * Discontinued Display.pretty_ctyp/cterm etc. INCOMPATIBILITY, use Syntax.pretty_typ/term directly, preferably with proper context instead of global theory. *** System *** * Further fine tuning of parallel proof checking, scales up to 8 cores (max. speedup factor 5.0). See also Goal.parallel_proofs in ML and usedir option -q. * Support for additional "Isabelle components" via etc/components, see also the system manual. * The isabelle makeall tool now operates on all components with IsaMakefile, not just hardwired "logics". * Removed "compress" option from isabelle-process and isabelle usedir; this is always enabled. * Discontinued support for Poly/ML 4.x versions. * Isabelle tool "wwwfind" provides web interface for 'find_theorems' on a given logic image. This requires the lighttpd webserver and is currently supported on Linux only. New in Isabelle2009 (April 2009) -------------------------------- *** General *** * Simplified main Isabelle executables, with less surprises on case-insensitive file-systems (such as Mac OS). - The main Isabelle tool wrapper is now called "isabelle" instead of "isatool." - The former "isabelle" alias for "isabelle-process" has been removed (should rarely occur to regular users). - The former "isabelle-interface" and its alias "Isabelle" have been removed (interfaces are now regular Isabelle tools). Within scripts and make files, the Isabelle environment variables ISABELLE_TOOL and ISABELLE_PROCESS replace old ISATOOL and ISABELLE, respectively. (The latter are still available as legacy feature.) The old isabelle-interface wrapper could react in confusing ways if the interface was uninstalled or changed otherwise. Individual interface tool configuration is now more explicit, see also the Isabelle system manual. In particular, Proof General is now available via "isabelle emacs". INCOMPATIBILITY, need to adapt derivative scripts. Users may need to purge installed copies of Isabelle executables and re-run "isabelle install -p ...", or use symlinks. * The default for ISABELLE_HOME_USER is now ~/.isabelle instead of the old ~/isabelle, which was slightly non-standard and apt to cause surprises on case-insensitive file-systems (such as Mac OS). INCOMPATIBILITY, need to move existing ~/isabelle/etc, ~/isabelle/heaps, ~/isabelle/browser_info to the new place. Special care is required when using older releases of Isabelle. Note that ISABELLE_HOME_USER can be changed in Isabelle/etc/settings of any Isabelle distribution, in order to use the new ~/.isabelle uniformly. * Proofs of fully specified statements are run in parallel on multi-core systems. A speedup factor of 2.5 to 3.2 can be expected on a regular 4-core machine, if the initial heap space is made reasonably large (cf. Poly/ML option -H). (Requires Poly/ML 5.2.1 or later.) * The main reference manuals ("isar-ref", "implementation", and "system") have been updated and extended. Formally checked references as hyperlinks are now available uniformly. *** Pure *** * Complete re-implementation of locales. INCOMPATIBILITY in several respects. The most important changes are listed below. See the Tutorial on Locales ("locales" manual) for details. - In locale expressions, instantiation replaces renaming. Parameters must be declared in a for clause. To aid compatibility with previous parameter inheritance, in locale declarations, parameters that are not 'touched' (instantiation position "_" or omitted) are implicitly added with their syntax at the beginning of the for clause. - Syntax from abbreviations and definitions in locales is available in locale expressions and context elements. The latter is particularly useful in locale declarations. - More flexible mechanisms to qualify names generated by locale expressions. Qualifiers (prefixes) may be specified in locale expressions, and can be marked as mandatory (syntax: "name!:") or optional (syntax "name?:"). The default depends for plain "name:" depends on the situation where a locale expression is used: in commands 'locale' and 'sublocale' prefixes are optional, in 'interpretation' and 'interpret' prefixes are mandatory. The old implicit qualifiers derived from the parameter names of a locale are no longer generated. - Command "sublocale l < e" replaces "interpretation l < e". The instantiation clause in "interpretation" and "interpret" (square brackets) is no longer available. Use locale expressions. - When converting proof scripts, mandatory qualifiers in 'interpretation' and 'interpret' should be retained by default, even if this is an INCOMPATIBILITY compared to former behavior. In the worst case, use the "name?:" form for non-mandatory ones. Qualifiers in locale expressions range over a single locale instance only. - Dropped locale element "includes". This is a major INCOMPATIBILITY. In existing theorem specifications replace the includes element by the respective context elements of the included locale, omitting those that are already present in the theorem specification. Multiple assume elements of a locale should be replaced by a single one involving the locale predicate. In the proof body, declarations (most notably theorems) may be regained by interpreting the respective locales in the proof context as required (command "interpret"). If using "includes" in replacement of a target solely because the parameter types in the theorem are not as general as in the target, consider declaring a new locale with additional type constraints on the parameters (context element "constrains"). - Discontinued "locale (open)". INCOMPATIBILITY. - Locale interpretation commands no longer attempt to simplify goal. INCOMPATIBILITY: in rare situations the generated goal differs. Use methods intro_locales and unfold_locales to clarify. - Locale interpretation commands no longer accept interpretation attributes. INCOMPATIBILITY. * Class declaration: so-called "base sort" must not be given in import list any longer, but is inferred from the specification. Particularly in HOL, write class foo = ... instead of class foo = type + ... * Class target: global versions of theorems stemming do not carry a parameter prefix any longer. INCOMPATIBILITY. * Class 'instance' command no longer accepts attached definitions. INCOMPATIBILITY, use proper 'instantiation' target instead. * Recovered hiding of consts, which was accidentally broken in Isabelle2007. Potential INCOMPATIBILITY, ``hide const c'' really makes c inaccessible; consider using ``hide (open) const c'' instead. * Slightly more coherent Pure syntax, with updated documentation in isar-ref manual. Removed locales meta_term_syntax and meta_conjunction_syntax: TERM and &&& (formerly &&) are now permanent, INCOMPATIBILITY in rare situations. Note that &&& should not be used directly in regular applications. * There is a new syntactic category "float_const" for signed decimal fractions (e.g. 123.45 or -123.45). * Removed exotic 'token_translation' command. INCOMPATIBILITY, use ML interface with 'setup' command instead. * Command 'local_setup' is similar to 'setup', but operates on a local theory context. * The 'axiomatization' command now only works within a global theory context. INCOMPATIBILITY. * Goal-directed proof now enforces strict proof irrelevance wrt. sort hypotheses. Sorts required in the course of reasoning need to be covered by the constraints in the initial statement, completed by the type instance information of the background theory. Non-trivial sort hypotheses, which rarely occur in practice, may be specified via vacuous propositions of the form SORT_CONSTRAINT('a::c). For example: lemma assumes "SORT_CONSTRAINT('a::empty)" shows False ... The result contains an implicit sort hypotheses as before -- SORT_CONSTRAINT premises are eliminated as part of the canonical rule normalization. * Generalized Isar history, with support for linear undo, direct state addressing etc. * Changed defaults for unify configuration options: unify_trace_bound = 50 (formerly 25) unify_search_bound = 60 (formerly 30) * Different bookkeeping for code equations (INCOMPATIBILITY): a) On theory merge, the last set of code equations for a particular constant is taken (in accordance with the policy applied by other parts of the code generator framework). b) Code equations stemming from explicit declarations (e.g. code attribute) gain priority over default code equations stemming from definition, primrec, fun etc. * Keyword 'code_exception' now named 'code_abort'. INCOMPATIBILITY. * Unified theorem tables for both code generators. Thus [code func] has disappeared and only [code] remains. INCOMPATIBILITY. * Command 'find_consts' searches for constants based on type and name patterns, e.g. find_consts "_ => bool" By default, matching is against subtypes, but it may be restricted to the whole type. Searching by name is possible. Multiple queries are conjunctive and queries may be negated by prefixing them with a hyphen: find_consts strict: "_ => bool" name: "Int" -"int => int" * New 'find_theorems' criterion "solves" matches theorems that directly solve the current goal (modulo higher-order unification). * Auto solve feature for main theorem statements: whenever a new goal is stated, "find_theorems solves" is called; any theorems that could solve the lemma directly are listed as part of the goal state. Cf. associated options in Proof General Isabelle settings menu, enabled by default, with reasonable timeout for pathological cases of higher-order unification. *** Document preparation *** * Antiquotation @{lemma} now imitates a regular terminal proof, demanding keyword 'by' and supporting the full method expression syntax just like the Isar command 'by'. *** HOL *** * Integrated main parts of former image HOL-Complex with HOL. Entry points Main and Complex_Main remain as before. * Logic image HOL-Plain provides a minimal HOL with the most important tools available (inductive, datatype, primrec, ...). This facilitates experimentation and tool development. Note that user applications (and library theories) should never refer to anything below theory Main, as before. * Logic image HOL-Main stops at theory Main, and thus facilitates experimentation due to shorter build times. * Logic image HOL-NSA contains theories of nonstandard analysis which were previously part of former HOL-Complex. Entry point Hyperreal remains valid, but theories formerly using Complex_Main should now use new entry point Hypercomplex. * Generic ATP manager for Sledgehammer, based on ML threads instead of Posix processes. Avoids potentially expensive forking of the ML process. New thread-based implementation also works on non-Unix platforms (Cygwin). Provers are no longer hardwired, but defined within the theory via plain ML wrapper functions. Basic Sledgehammer commands are covered in the isar-ref manual. * Wrapper scripts for remote SystemOnTPTP service allows to use sledgehammer without local ATP installation (Vampire etc.). Other provers may be included via suitable ML wrappers, see also src/HOL/ATP_Linkup.thy. * ATP selection (E/Vampire/Spass) is now via Proof General's settings menu. * The metis method no longer fails because the theorem is too trivial (contains the empty clause). * The metis method now fails in the usual manner, rather than raising an exception, if it determines that it cannot prove the theorem. * Method "coherent" implements a prover for coherent logic (see also src/Tools/coherent.ML). * Constants "undefined" and "default" replace "arbitrary". Usually "undefined" is the right choice to replace "arbitrary", though logically there is no difference. INCOMPATIBILITY. * Command "value" now integrates different evaluation mechanisms. The result of the first successful evaluation mechanism is printed. In square brackets a particular named evaluation mechanisms may be specified (currently, [SML], [code] or [nbe]). See further src/HOL/ex/Eval_Examples.thy. * Normalization by evaluation now allows non-leftlinear equations. Declare with attribute [code nbe]. * Methods "case_tac" and "induct_tac" now refer to the very same rules as the structured Isar versions "cases" and "induct", cf. the corresponding "cases" and "induct" attributes. Mutual induction rules are now presented as a list of individual projections (e.g. foo_bar.inducts for types foo and bar); the old format with explicit HOL conjunction is no longer supported. INCOMPATIBILITY, in rare situations a different rule is selected --- notably nested tuple elimination instead of former prod.exhaust: use explicit (case_tac t rule: prod.exhaust) here. * Attributes "cases", "induct", "coinduct" support "del" option. * Removed fact "case_split_thm", which duplicates "case_split". * The option datatype has been moved to a new theory Option. Renamed option_map to Option.map, and o2s to Option.set, INCOMPATIBILITY. * New predicate "strict_mono" classifies strict functions on partial orders. With strict functions on linear orders, reasoning about (in)equalities is facilitated by theorems "strict_mono_eq", "strict_mono_less_eq" and "strict_mono_less". * Some set operations are now proper qualified constants with authentic syntax. INCOMPATIBILITY: op Int ~> Set.Int op Un ~> Set.Un INTER ~> Set.INTER UNION ~> Set.UNION Inter ~> Set.Inter Union ~> Set.Union {} ~> Set.empty UNIV ~> Set.UNIV * Class complete_lattice with operations Inf, Sup, INFI, SUPR now in theory Set. * Auxiliary class "itself" has disappeared -- classes without any parameter are treated as expected by the 'class' command. * Leibnitz's Series for Pi and the arcus tangens and logarithm series. * Common decision procedures (Cooper, MIR, Ferrack, Approximation, Dense_Linear_Order) are now in directory HOL/Decision_Procs. * Theory src/HOL/Decision_Procs/Approximation provides the new proof method "approximation". It proves formulas on real values by using interval arithmetic. In the formulas are also the transcendental functions sin, cos, tan, atan, ln, exp and the constant pi are allowed. For examples see src/HOL/Descision_Procs/ex/Approximation_Ex.thy. * Theory "Reflection" now resides in HOL/Library. * Entry point to Word library now simply named "Word". INCOMPATIBILITY. * Made source layout more coherent with logical distribution structure: src/HOL/Library/RType.thy ~> src/HOL/Typerep.thy src/HOL/Library/Code_Message.thy ~> src/HOL/ src/HOL/Library/GCD.thy ~> src/HOL/ src/HOL/Library/Order_Relation.thy ~> src/HOL/ src/HOL/Library/Parity.thy ~> src/HOL/ src/HOL/Library/Univ_Poly.thy ~> src/HOL/ src/HOL/Real/ContNotDenum.thy ~> src/HOL/Library/ src/HOL/Real/Lubs.thy ~> src/HOL/ src/HOL/Real/PReal.thy ~> src/HOL/ src/HOL/Real/Rational.thy ~> src/HOL/ src/HOL/Real/RComplete.thy ~> src/HOL/ src/HOL/Real/RealDef.thy ~> src/HOL/ src/HOL/Real/RealPow.thy ~> src/HOL/ src/HOL/Real/Real.thy ~> src/HOL/ src/HOL/Complex/Complex_Main.thy ~> src/HOL/ src/HOL/Complex/Complex.thy ~> src/HOL/ src/HOL/Complex/FrechetDeriv.thy ~> src/HOL/Library/ src/HOL/Complex/Fundamental_Theorem_Algebra.thy ~> src/HOL/Library/ src/HOL/Hyperreal/Deriv.thy ~> src/HOL/ src/HOL/Hyperreal/Fact.thy ~> src/HOL/ src/HOL/Hyperreal/Integration.thy ~> src/HOL/ src/HOL/Hyperreal/Lim.thy ~> src/HOL/ src/HOL/Hyperreal/Ln.thy ~> src/HOL/ src/HOL/Hyperreal/Log.thy ~> src/HOL/ src/HOL/Hyperreal/MacLaurin.thy ~> src/HOL/ src/HOL/Hyperreal/NthRoot.thy ~> src/HOL/ src/HOL/Hyperreal/Series.thy ~> src/HOL/ src/HOL/Hyperreal/SEQ.thy ~> src/HOL/ src/HOL/Hyperreal/Taylor.thy ~> src/HOL/ src/HOL/Hyperreal/Transcendental.thy ~> src/HOL/ src/HOL/Real/Float ~> src/HOL/Library/ src/HOL/Real/HahnBanach ~> src/HOL/HahnBanach src/HOL/Real/RealVector.thy ~> src/HOL/ src/HOL/arith_data.ML ~> src/HOL/Tools src/HOL/hologic.ML ~> src/HOL/Tools src/HOL/simpdata.ML ~> src/HOL/Tools src/HOL/int_arith1.ML ~> src/HOL/Tools/int_arith.ML src/HOL/int_factor_simprocs.ML ~> src/HOL/Tools src/HOL/nat_simprocs.ML ~> src/HOL/Tools src/HOL/Real/float_arith.ML ~> src/HOL/Tools src/HOL/Real/float_syntax.ML ~> src/HOL/Tools src/HOL/Real/rat_arith.ML ~> src/HOL/Tools src/HOL/Real/real_arith.ML ~> src/HOL/Tools src/HOL/Library/Array.thy ~> src/HOL/Imperative_HOL src/HOL/Library/Heap_Monad.thy ~> src/HOL/Imperative_HOL src/HOL/Library/Heap.thy ~> src/HOL/Imperative_HOL src/HOL/Library/Imperative_HOL.thy ~> src/HOL/Imperative_HOL src/HOL/Library/Ref.thy ~> src/HOL/Imperative_HOL src/HOL/Library/Relational.thy ~> src/HOL/Imperative_HOL * If methods "eval" and "evaluation" encounter a structured proof state with !!/==>, only the conclusion is evaluated to True (if possible), avoiding strange error messages. * Method "sizechange" automates termination proofs using (a modification of) the size-change principle. Requires SAT solver. See src/HOL/ex/Termination.thy for examples. * Simplifier: simproc for let expressions now unfolds if bound variable occurs at most once in let expression body. INCOMPATIBILITY. * Method "arith": Linear arithmetic now ignores all inequalities when fast_arith_neq_limit is exceeded, instead of giving up entirely. * New attribute "arith" for facts that should always be used automatically by arithmetic. It is intended to be used locally in proofs, e.g. assumes [arith]: "x > 0" Global usage is discouraged because of possible performance impact. * New classes "top" and "bot" with corresponding operations "top" and "bot" in theory Orderings; instantiation of class "complete_lattice" requires instantiation of classes "top" and "bot". INCOMPATIBILITY. * Changed definition lemma "less_fun_def" in order to provide an instance for preorders on functions; use lemma "less_le" instead. INCOMPATIBILITY. * Theory Orderings: class "wellorder" moved here, with explicit induction rule "less_induct" as assumption. For instantiation of "wellorder" by means of predicate "wf", use rule wf_wellorderI. INCOMPATIBILITY. * Theory Orderings: added class "preorder" as superclass of "order". INCOMPATIBILITY: Instantiation proofs for order, linorder etc. slightly changed. Some theorems named order_class.* now named preorder_class.*. * Theory Relation: renamed "refl" to "refl_on", "reflexive" to "refl, "diag" to "Id_on". * Theory Finite_Set: added a new fold combinator of type ('a => 'b => 'b) => 'b => 'a set => 'b Occasionally this is more convenient than the old fold combinator which is now defined in terms of the new one and renamed to fold_image. * Theories Ring_and_Field and OrderedGroup: The lemmas "group_simps" and "ring_simps" have been replaced by "algebra_simps" (which can be extended with further lemmas!). At the moment both still exist but the former will disappear at some point. * Theory Power: Lemma power_Suc is now declared as a simp rule in class recpower. Type-specific simp rules for various recpower types have been removed. INCOMPATIBILITY, rename old lemmas as follows: rat_power_0 -> power_0 rat_power_Suc -> power_Suc realpow_0 -> power_0 realpow_Suc -> power_Suc complexpow_0 -> power_0 complexpow_Suc -> power_Suc power_poly_0 -> power_0 power_poly_Suc -> power_Suc * Theories Ring_and_Field and Divides: Definition of "op dvd" has been moved to separate class dvd in Ring_and_Field; a couple of lemmas on dvd has been generalized to class comm_semiring_1. Likewise a bunch of lemmas from Divides has been generalized from nat to class semiring_div. INCOMPATIBILITY. This involves the following theorem renames resulting from duplicate elimination: dvd_def_mod ~> dvd_eq_mod_eq_0 zero_dvd_iff ~> dvd_0_left_iff dvd_0 ~> dvd_0_right DIVISION_BY_ZERO_DIV ~> div_by_0 DIVISION_BY_ZERO_MOD ~> mod_by_0 mult_div ~> div_mult_self2_is_id mult_mod ~> mod_mult_self2_is_0 * Theory IntDiv: removed many lemmas that are instances of class-based generalizations (from Divides and Ring_and_Field). INCOMPATIBILITY, rename old lemmas as follows: dvd_diff -> nat_dvd_diff dvd_zminus_iff -> dvd_minus_iff mod_add1_eq -> mod_add_eq mod_mult1_eq -> mod_mult_right_eq mod_mult1_eq' -> mod_mult_left_eq mod_mult_distrib_mod -> mod_mult_eq nat_mod_add_left_eq -> mod_add_left_eq nat_mod_add_right_eq -> mod_add_right_eq nat_mod_div_trivial -> mod_div_trivial nat_mod_mod_trivial -> mod_mod_trivial zdiv_zadd_self1 -> div_add_self1 zdiv_zadd_self2 -> div_add_self2 zdiv_zmult_self1 -> div_mult_self2_is_id zdiv_zmult_self2 -> div_mult_self1_is_id zdvd_triv_left -> dvd_triv_left zdvd_triv_right -> dvd_triv_right zdvd_zmult_cancel_disj -> dvd_mult_cancel_left zmod_eq0_zdvd_iff -> dvd_eq_mod_eq_0[symmetric] zmod_zadd_left_eq -> mod_add_left_eq zmod_zadd_right_eq -> mod_add_right_eq zmod_zadd_self1 -> mod_add_self1 zmod_zadd_self2 -> mod_add_self2 zmod_zadd1_eq -> mod_add_eq zmod_zdiff1_eq -> mod_diff_eq zmod_zdvd_zmod -> mod_mod_cancel zmod_zmod_cancel -> mod_mod_cancel zmod_zmult_self1 -> mod_mult_self2_is_0 zmod_zmult_self2 -> mod_mult_self1_is_0 zmod_1 -> mod_by_1 zdiv_1 -> div_by_1 zdvd_abs1 -> abs_dvd_iff zdvd_abs2 -> dvd_abs_iff zdvd_refl -> dvd_refl zdvd_trans -> dvd_trans zdvd_zadd -> dvd_add zdvd_zdiff -> dvd_diff zdvd_zminus_iff -> dvd_minus_iff zdvd_zminus2_iff -> minus_dvd_iff zdvd_zmultD -> dvd_mult_right zdvd_zmultD2 -> dvd_mult_left zdvd_zmult_mono -> mult_dvd_mono zdvd_0_right -> dvd_0_right zdvd_0_left -> dvd_0_left_iff zdvd_1_left -> one_dvd zminus_dvd_iff -> minus_dvd_iff * Theory Rational: 'Fract k 0' now equals '0'. INCOMPATIBILITY. * The real numbers offer decimal input syntax: 12.34 is translated into 1234/10^2. This translation is not reversed upon output. * Theory Library/Polynomial defines an abstract type 'a poly of univariate polynomials with coefficients of type 'a. In addition to the standard ring operations, it also supports div and mod. Code generation is also supported, using list-style constructors. * Theory Library/Inner_Product defines a class of real_inner for real inner product spaces, with an overloaded operation inner :: 'a => 'a => real. Class real_inner is a subclass of real_normed_vector from theory RealVector. * Theory Library/Product_Vector provides instances for the product type 'a * 'b of several classes from RealVector and Inner_Product. Definitions of addition, subtraction, scalar multiplication, norms, and inner products are included. * Theory Library/Bit defines the field "bit" of integers modulo 2. In addition to the field operations, numerals and case syntax are also supported. * Theory Library/Diagonalize provides constructive version of Cantor's first diagonalization argument. * Theory Library/GCD: Curried operations gcd, lcm (for nat) and zgcd, zlcm (for int); carried together from various gcd/lcm developements in the HOL Distribution. Constants zgcd and zlcm replace former igcd and ilcm; corresponding theorems renamed accordingly. INCOMPATIBILITY, may recover tupled syntax as follows: hide (open) const gcd abbreviation gcd where "gcd == (%(a, b). GCD.gcd a b)" notation (output) GCD.gcd ("gcd '(_, _')") The same works for lcm, zgcd, zlcm. * Theory Library/Nat_Infinity: added addition, numeral syntax and more instantiations for algebraic structures. Removed some duplicate theorems. Changes in simp rules. INCOMPATIBILITY. * ML antiquotation @{code} takes a constant as argument and generates corresponding code in background and inserts name of the corresponding resulting ML value/function/datatype constructor binding in place. All occurrences of @{code} with a single ML block are generated simultaneously. Provides a generic and safe interface for instrumentalizing code generation. See src/HOL/Decision_Procs/Ferrack.thy for a more ambitious application. In future you ought to refrain from ad-hoc compiling generated SML code on the ML toplevel. Note that (for technical reasons) @{code} cannot refer to constants for which user-defined serializations are set. Refer to the corresponding ML counterpart directly in that cases. * Command 'rep_datatype': instead of theorem names the command now takes a list of terms denoting the constructors of the type to be represented as datatype. The characteristic theorems have to be proven. INCOMPATIBILITY. Also observe that the following theorems have disappeared in favour of existing ones: unit_induct ~> unit.induct prod_induct ~> prod.induct sum_induct ~> sum.induct Suc_Suc_eq ~> nat.inject Suc_not_Zero Zero_not_Suc ~> nat.distinct *** HOL-Algebra *** * New locales for orders and lattices where the equivalence relation is not restricted to equality. INCOMPATIBILITY: all order and lattice locales use a record structure with field eq for the equivalence. * New theory of factorial domains. * Units_l_inv and Units_r_inv are now simp rules by default. INCOMPATIBILITY. Simplifier proof that require deletion of l_inv and/or r_inv will now also require deletion of these lemmas. * Renamed the following theorems, INCOMPATIBILITY: UpperD ~> Upper_memD LowerD ~> Lower_memD least_carrier ~> least_closed greatest_carrier ~> greatest_closed greatest_Lower_above ~> greatest_Lower_below one_zero ~> carrier_one_zero one_not_zero ~> carrier_one_not_zero (collision with assumption) *** HOL-Nominal *** * Nominal datatypes can now contain type-variables. * Commands 'nominal_inductive' and 'equivariance' work with local theory targets. * Nominal primrec can now works with local theory targets and its specification syntax now conforms to the general format as seen in 'inductive' etc. * Method "perm_simp" honours the standard simplifier attributes (no_asm), (no_asm_use) etc. * The new predicate #* is defined like freshness, except that on the left hand side can be a set or list of atoms. * Experimental command 'nominal_inductive2' derives strong induction principles for inductive definitions. In contrast to 'nominal_inductive', which can only deal with a fixed number of binders, it can deal with arbitrary expressions standing for sets of atoms to be avoided. The only inductive definition we have at the moment that needs this generalisation is the typing rule for Lets in the algorithm W: Gamma |- t1 : T1 (x,close Gamma T1)::Gamma |- t2 : T2 x#Gamma ----------------------------------------------------------------- Gamma |- Let x be t1 in t2 : T2 In this rule one wants to avoid all the binders that are introduced by "close Gamma T1". We are looking for other examples where this feature might be useful. Please let us know. *** HOLCF *** * Reimplemented the simplification procedure for proving continuity subgoals. The new simproc is extensible; users can declare additional continuity introduction rules with the attribute [cont2cont]. * The continuity simproc now uses a different introduction rule for solving continuity subgoals on terms with lambda abstractions. In some rare cases the new simproc may fail to solve subgoals that the old one could solve, and "simp add: cont2cont_LAM" may be necessary. Potential INCOMPATIBILITY. * Command 'fixrec': specification syntax now conforms to the general format as seen in 'inductive' etc. See src/HOLCF/ex/Fixrec_ex.thy for examples. INCOMPATIBILITY. *** ZF *** * Proof of Zorn's Lemma for partial orders. *** ML *** * Multithreading for Poly/ML 5.1/5.2 is no longer supported, only for Poly/ML 5.2.1 or later. Important note: the TimeLimit facility depends on multithreading, so timouts will not work before Poly/ML 5.2.1! * High-level support for concurrent ML programming, see src/Pure/Cuncurrent. The data-oriented model of "future values" is particularly convenient to organize independent functional computations. The concept of "synchronized variables" provides a higher-order interface for components with shared state, avoiding the delicate details of mutexes and condition variables. (Requires Poly/ML 5.2.1 or later.) * ML bindings produced via Isar commands are stored within the Isar context (theory or proof). Consequently, commands like 'use' and 'ML' become thread-safe and work with undo as expected (concerning top-level bindings, not side-effects on global references). INCOMPATIBILITY, need to provide proper Isar context when invoking the compiler at runtime; really global bindings need to be given outside a theory. (Requires Poly/ML 5.2 or later.) * Command 'ML_prf' is analogous to 'ML' but works within a proof context. Top-level ML bindings are stored within the proof context in a purely sequential fashion, disregarding the nested proof structure. ML bindings introduced by 'ML_prf' are discarded at the end of the proof. (Requires Poly/ML 5.2 or later.) * Simplified ML attribute and method setup, cf. functions Attrib.setup and Method.setup, as well as Isar commands 'attribute_setup' and 'method_setup'. INCOMPATIBILITY for 'method_setup', need to simplify existing code accordingly, or use plain 'setup' together with old Method.add_method. * Simplified ML oracle interface Thm.add_oracle promotes 'a -> cterm to 'a -> thm, while results are always tagged with an authentic oracle name. The Isar command 'oracle' is now polymorphic, no argument type is specified. INCOMPATIBILITY, need to simplify existing oracle code accordingly. Note that extra performance may be gained by producing the cterm carefully, avoiding slow Thm.cterm_of. * Simplified interface for defining document antiquotations via ThyOutput.antiquotation, ThyOutput.output, and optionally ThyOutput.maybe_pretty_source. INCOMPATIBILITY, need to simplify user antiquotations accordingly, see src/Pure/Thy/thy_output.ML for common examples. * More systematic treatment of long names, abstract name bindings, and name space operations. Basic operations on qualified names have been move from structure NameSpace to Long_Name, e.g. Long_Name.base_name, Long_Name.append. Old type bstring has been mostly replaced by abstract type binding (see structure Binding), which supports precise qualification by packages and local theory targets, as well as proper tracking of source positions. INCOMPATIBILITY, need to wrap old bstring values into Binding.name, or better pass through abstract bindings everywhere. See further src/Pure/General/long_name.ML, src/Pure/General/binding.ML and src/Pure/General/name_space.ML * Result facts (from PureThy.note_thms, ProofContext.note_thms, LocalTheory.note etc.) now refer to the *full* internal name, not the bstring as before. INCOMPATIBILITY, not detected by ML type-checking! * Disposed old type and term read functions (Sign.read_def_typ, Sign.read_typ, Sign.read_def_terms, Sign.read_term, Thm.read_def_cterms, Thm.read_cterm etc.). INCOMPATIBILITY, should use regular Syntax.read_typ, Syntax.read_term, Syntax.read_typ_global, Syntax.read_term_global etc.; see also OldGoals.read_term as last resort for legacy applications. * Disposed old declarations, tactics, tactic combinators that refer to the simpset or claset of an implicit theory (such as Addsimps, Simp_tac, SIMPSET). INCOMPATIBILITY, should use @{simpset} etc. in embedded ML text, or local_simpset_of with a proper context passed as explicit runtime argument. * Rules and tactics that read instantiations (read_instantiate, res_inst_tac, thin_tac, subgoal_tac etc.) now demand a proper proof context, which is required for parsing and type-checking. Moreover, the variables are specified as plain indexnames, not string encodings thereof. INCOMPATIBILITY. * Generic Toplevel.add_hook interface allows to analyze the result of transactions. E.g. see src/Pure/ProofGeneral/proof_general_pgip.ML for theorem dependency output of transactions resulting in a new theory state. * ML antiquotations: block-structured compilation context indicated by \ ... \; additional antiquotation forms: @{binding name} - basic name binding @{let ?pat = term} - term abbreviation (HO matching) @{note name = fact} - fact abbreviation @{thm fact} - singleton fact (with attributes) @{thms fact} - general fact (with attributes) @{lemma prop by method} - singleton goal @{lemma prop by meth1 meth2} - singleton goal @{lemma prop1 ... propN by method} - general goal @{lemma prop1 ... propN by meth1 meth2} - general goal @{lemma (open) ...} - open derivation *** System *** * The Isabelle "emacs" tool provides a specific interface to invoke Proof General / Emacs, with more explicit failure if that is not installed (the old isabelle-interface script silently falls back on isabelle-process). The PROOFGENERAL_HOME setting determines the installation location of the Proof General distribution. * Isabelle/lib/classes/Pure.jar provides basic support to integrate the Isabelle process into a JVM/Scala application. See Isabelle/lib/jedit/plugin for a minimal example. (The obsolete Java process wrapper has been discontinued.) * Added homegrown Isabelle font with unicode layout, see lib/fonts. * Various status messages (with exact source position information) are emitted, if proper markup print mode is enabled. This allows user-interface components to provide detailed feedback on internal prover operations. New in Isabelle2008 (June 2008) ------------------------------- *** General *** * The Isabelle/Isar Reference Manual (isar-ref) has been reorganized and updated, with formally checked references as hyperlinks. * Theory loader: use_thy (and similar operations) no longer set the implicit ML context, which was occasionally hard to predict and in conflict with concurrency. INCOMPATIBILITY, use ML within Isar which provides a proper context already. * Theory loader: old-style ML proof scripts being *attached* to a thy file are no longer supported. INCOMPATIBILITY, regular 'uses' and 'use' within a theory file will do the job. * Name space merge now observes canonical order, i.e. the second space is inserted into the first one, while existing entries in the first space take precedence. INCOMPATIBILITY in rare situations, may try to swap theory imports. * Syntax: symbol \ is now considered a letter. Potential INCOMPATIBILITY in identifier syntax etc. * Outer syntax: string tokens no longer admit escaped white space, which was an accidental (undocumented) feature. INCOMPATIBILITY, use white space without escapes. * Outer syntax: string tokens may contain arbitrary character codes specified via 3 decimal digits (as in SML). E.g. "foo\095bar" for "foo_bar". *** Pure *** * Context-dependent token translations. Default setup reverts locally fixed variables, and adds hilite markup for undeclared frees. * Unused theorems can be found using the new command 'unused_thms'. There are three ways of invoking it: (1) unused_thms Only finds unused theorems in the current theory. (2) unused_thms thy_1 ... thy_n - Finds unused theorems in the current theory and all of its ancestors, excluding the theories thy_1 ... thy_n and all of their ancestors. (3) unused_thms thy_1 ... thy_n - thy'_1 ... thy'_m Finds unused theorems in the theories thy'_1 ... thy'_m and all of their ancestors, excluding the theories thy_1 ... thy_n and all of their ancestors. In order to increase the readability of the list produced by unused_thms, theorems that have been created by a particular instance of a theory command such as 'inductive' or 'function' are considered to belong to the same "group", meaning that if at least one theorem in this group is used, the other theorems in the same group are no longer reported as unused. Moreover, if all theorems in the group are unused, only one theorem in the group is displayed. Note that proof objects have to be switched on in order for unused_thms to work properly (i.e. !proofs must be >= 1, which is usually the case when using Proof General with the default settings). * Authentic naming of facts disallows ad-hoc overwriting of previous theorems within the same name space. INCOMPATIBILITY, need to remove duplicate fact bindings, or even accidental fact duplications. Note that tools may maintain dynamically scoped facts systematically, using PureThy.add_thms_dynamic. * Command 'hide' now allows to hide from "fact" name space as well. * Eliminated destructive theorem database, simpset, claset, and clasimpset. Potential INCOMPATIBILITY, really need to observe linear update of theories within ML code. * Eliminated theory ProtoPure and CPure, leaving just one Pure theory. INCOMPATIBILITY, object-logics depending on former Pure require additional setup PureThy.old_appl_syntax_setup; object-logics depending on former CPure need to refer to Pure. * Commands 'use' and 'ML' are now purely functional, operating on theory/local_theory. Removed former 'ML_setup' (on theory), use 'ML' instead. Added 'ML_val' as mere diagnostic replacement for 'ML'. INCOMPATIBILITY. * Command 'setup': discontinued implicit version with ML reference. * Instantiation target allows for simultaneous specification of class instance operations together with an instantiation proof. Type-checking phase allows to refer to class operations uniformly. See src/HOL/Complex/Complex.thy for an Isar example and src/HOL/Library/Eval.thy for an ML example. * Indexing of literal facts: be more serious about including only facts from the visible specification/proof context, but not the background context (locale etc.). Affects `prop` notation and method "fact". INCOMPATIBILITY: need to name facts explicitly in rare situations. * Method "cases", "induct", "coinduct": removed obsolete/undocumented "(open)" option, which used to expose internal bound variables to the proof text. * Isar statements: removed obsolete case "rule_context". INCOMPATIBILITY, better use explicit fixes/assumes. * Locale proofs: default proof step now includes 'unfold_locales'; hence 'proof' without argument may be used to unfold locale predicates. *** Document preparation *** * Simplified pdfsetup.sty: color/hyperref is used unconditionally for both pdf and dvi (hyperlinks usually work in xdvi as well); removed obsolete thumbpdf setup (contemporary PDF viewers do this on the spot); renamed link color from "darkblue" to "linkcolor" (default value unchanged, can be redefined via \definecolor); no longer sets "a4paper" option (unnecessary or even intrusive). * Antiquotation @{lemma A method} proves proposition A by the given method (either a method name or a method name plus (optional) method arguments in parentheses) and prints A just like @{prop A}. *** HOL *** * New primrec package. Specification syntax conforms in style to definition/function/.... No separate induction rule is provided. The "primrec" command distinguishes old-style and new-style specifications by syntax. The former primrec package is now named OldPrimrecPackage. When adjusting theories, beware: constants stemming from new-style primrec specifications have authentic syntax. * Metis prover is now an order of magnitude faster, and also works with multithreading. * Metis: the maximum number of clauses that can be produced from a theorem is now given by the attribute max_clauses. Theorems that exceed this number are ignored, with a warning printed. * Sledgehammer no longer produces structured proofs by default. To enable, declare [[sledgehammer_full = true]]. Attributes reconstruction_modulus, reconstruction_sorts renamed sledgehammer_modulus, sledgehammer_sorts. INCOMPATIBILITY. * Method "induct_scheme" derives user-specified induction rules from well-founded induction and completeness of patterns. This factors out some operations that are done internally by the function package and makes them available separately. See src/HOL/ex/Induction_Scheme.thy for examples. * More flexible generation of measure functions for termination proofs: Measure functions can be declared by proving a rule of the form "is_measure f" and giving it the [measure_function] attribute. The "is_measure" predicate is logically meaningless (always true), and just guides the heuristic. To find suitable measure functions, the termination prover sets up the goal "is_measure ?f" of the appropriate type and generates all solutions by Prolog-style backward proof using the declared rules. This setup also deals with rules like "is_measure f ==> is_measure (list_size f)" which accommodates nested datatypes that recurse through lists. Similar rules are predeclared for products and option types. * Turned the type of sets "'a set" into an abbreviation for "'a => bool" INCOMPATIBILITIES: - Definitions of overloaded constants on sets have to be replaced by definitions on => and bool. - Some definitions of overloaded operators on sets can now be proved using the definitions of the operators on => and bool. Therefore, the following theorems have been renamed: subset_def -> subset_eq psubset_def -> psubset_eq set_diff_def -> set_diff_eq Compl_def -> Compl_eq Sup_set_def -> Sup_set_eq Inf_set_def -> Inf_set_eq sup_set_def -> sup_set_eq inf_set_def -> inf_set_eq - Due to the incompleteness of the HO unification algorithm, some rules such as subst may require manual instantiation, if some of the unknowns in the rule is a set. - Higher order unification and forward proofs: The proof pattern have "P (S::'a set)" <...> then have "EX S. P S" .. no longer works (due to the incompleteness of the HO unification algorithm) and must be replaced by the pattern have "EX S. P S" proof show "P S" <...> qed - Calculational reasoning with subst (or similar rules): The proof pattern have "P (S::'a set)" <...> also have "S = T" <...> finally have "P T" . no longer works (for similar reasons as the previous example) and must be replaced by something like have "P (S::'a set)" <...> moreover have "S = T" <...> ultimately have "P T" by simp - Tactics or packages written in ML code: Code performing pattern matching on types via Type ("set", [T]) => ... must be rewritten. Moreover, functions like strip_type or binder_types no longer return the right value when applied to a type of the form T1 => ... => Tn => U => bool rather than T1 => ... => Tn => U set * Merged theories Wellfounded_Recursion, Accessible_Part and Wellfounded_Relations to theory Wellfounded. * Explicit class "eq" for executable equality. INCOMPATIBILITY. * Class finite no longer treats UNIV as class parameter. Use class enum from theory Library/Enum instead to achieve a similar effect. INCOMPATIBILITY. * Theory List: rule list_induct2 now has explicitly named cases "Nil" and "Cons". INCOMPATIBILITY. * HOL (and FOL): renamed variables in rules imp_elim and swap. Potential INCOMPATIBILITY. * Theory Product_Type: duplicated lemmas split_Pair_apply and injective_fst_snd removed, use split_eta and prod_eqI instead. Renamed upd_fst to apfst and upd_snd to apsnd. INCOMPATIBILITY. * Theory Nat: removed redundant lemmas that merely duplicate lemmas of the same name in theory Orderings: less_trans less_linear le_imp_less_or_eq le_less_trans less_le_trans less_not_sym less_asym Renamed less_imp_le to less_imp_le_nat, and less_irrefl to less_irrefl_nat. Potential INCOMPATIBILITY due to more general types and different variable names. * Library/Option_ord.thy: Canonical order on option type. * Library/RBT.thy: Red-black trees, an efficient implementation of finite maps. * Library/Countable.thy: Type class for countable types. * Theory Int: The representation of numerals has changed. The infix operator BIT and the bit datatype with constructors B0 and B1 have disappeared. INCOMPATIBILITY, use "Int.Bit0 x" and "Int.Bit1 y" in place of "x BIT bit.B0" and "y BIT bit.B1", respectively. Theorems involving BIT, B0, or B1 have been renamed with "Bit0" or "Bit1" accordingly. * Theory Nat: definition of <= and < on natural numbers no longer depend on well-founded relations. INCOMPATIBILITY. Definitions le_def and less_def have disappeared. Consider lemmas not_less [symmetric, where ?'a = nat] and less_eq [symmetric] instead. * Theory Finite_Set: locales ACf, ACe, ACIf, ACIfSL and ACIfSLlin (whose purpose mainly is for various fold_set functionals) have been abandoned in favor of the existing algebraic classes ab_semigroup_mult, comm_monoid_mult, ab_semigroup_idem_mult, lower_semilattice (resp. upper_semilattice) and linorder. INCOMPATIBILITY. * Theory Transitive_Closure: induct and cases rules now declare proper case_names ("base" and "step"). INCOMPATIBILITY. * Theorem Inductive.lfp_ordinal_induct generalized to complete lattices. The form set-specific version is available as Inductive.lfp_ordinal_induct_set. * Renamed theorems "power.simps" to "power_int.simps". INCOMPATIBILITY. * Class semiring_div provides basic abstract properties of semirings with division and modulo operations. Subsumes former class dvd_mod. * Merged theories IntDef, Numeral and IntArith into unified theory Int. INCOMPATIBILITY. * Theory Library/Code_Index: type "index" now represents natural numbers rather than integers. INCOMPATIBILITY. * New class "uminus" with operation "uminus" (split of from class "minus" which now only has operation "minus", binary). INCOMPATIBILITY. * Constants "card", "internal_split", "option_map" now with authentic syntax. INCOMPATIBILITY. * Definitions subset_def, psubset_def, set_diff_def, Compl_def, le_bool_def, less_bool_def, le_fun_def, less_fun_def, inf_bool_def, sup_bool_def, Inf_bool_def, Sup_bool_def, inf_fun_def, sup_fun_def, Inf_fun_def, Sup_fun_def, inf_set_def, sup_set_def, Inf_set_def, Sup_set_def, le_def, less_def, option_map_def now with object equality. INCOMPATIBILITY. * Records. Removed K_record, and replaced it by pure lambda term %x. c. The simplifier setup is now more robust against eta expansion. INCOMPATIBILITY: in cases explicitly referring to K_record. * Library/Multiset: {#a, b, c#} abbreviates {#a#} + {#b#} + {#c#}. * Library/ListVector: new theory of arithmetic vector operations. * Library/Order_Relation: new theory of various orderings as sets of pairs. Defines preorders, partial orders, linear orders and well-orders on sets and on types. *** ZF *** * Renamed some theories to allow to loading both ZF and HOL in the same session: Datatype -> Datatype_ZF Inductive -> Inductive_ZF Int -> Int_ZF IntDiv -> IntDiv_ZF Nat -> Nat_ZF List -> List_ZF Main -> Main_ZF INCOMPATIBILITY: ZF theories that import individual theories below Main might need to be adapted. Regular theory Main is still available, as trivial extension of Main_ZF. *** ML *** * ML within Isar: antiquotation @{const name} or @{const name(typargs)} produces statically-checked Const term. * Functor NamedThmsFun: data is available to the user as dynamic fact (of the same name). Removed obsolete print command. * Removed obsolete "use_legacy_bindings" function. * The ``print mode'' is now a thread-local value derived from a global template (the former print_mode reference), thus access becomes non-critical. The global print_mode reference is for session management only; user-code should use print_mode_value, print_mode_active, PrintMode.setmp etc. INCOMPATIBILITY. * Functions system/system_out provide a robust way to invoke external shell commands, with propagation of interrupts (requires Poly/ML 5.2.1). Do not use OS.Process.system etc. from the basis library! *** System *** * Default settings: PROOFGENERAL_OPTIONS no longer impose xemacs --- in accordance with Proof General 3.7, which prefers GNU emacs. * isatool tty runs Isabelle process with plain tty interaction; optional line editor may be specified via ISABELLE_LINE_EDITOR setting, the default settings attempt to locate "ledit" and "rlwrap". * isatool browser now works with Cygwin as well, using general "javapath" function defined in Isabelle process environment. * YXML notation provides a simple and efficient alternative to standard XML transfer syntax. See src/Pure/General/yxml.ML and isatool yxml as described in the Isabelle system manual. * JVM class isabelle.IsabelleProcess (located in Isabelle/lib/classes) provides general wrapper for managing an Isabelle process in a robust fashion, with ``cooked'' output from stdin/stderr. * Rudimentary Isabelle plugin for jEdit (see Isabelle/lib/jedit), based on Isabelle/JVM process wrapper (see Isabelle/lib/classes). * Removed obsolete THIS_IS_ISABELLE_BUILD feature. NB: the documented way of changing the user's settings is via ISABELLE_HOME_USER/etc/settings, which is a fully featured bash script. * Multithreading.max_threads := 0 refers to the number of actual CPU cores of the underlying machine, which is a good starting point for optimal performance tuning. The corresponding usedir option -M allows "max" as an alias for "0". WARNING: does not work on certain versions of Mac OS (with Poly/ML 5.1). * isabelle-process: non-ML sessions are run with "nice", to reduce the adverse effect of Isabelle flooding interactive front-ends (notably ProofGeneral / XEmacs). New in Isabelle2007 (November 2007) ----------------------------------- *** General *** * More uniform information about legacy features, notably a warning/error of "Legacy feature: ...", depending on the state of the tolerate_legacy_features flag (default true). FUTURE INCOMPATIBILITY: legacy features will disappear eventually. * Theory syntax: the header format ``theory A = B + C:'' has been discontinued in favour of ``theory A imports B C begin''. Use isatool fixheaders to convert existing theory files. INCOMPATIBILITY. * Theory syntax: the old non-Isar theory file format has been discontinued altogether. Note that ML proof scripts may still be used with Isar theories; migration is usually quite simple with the ML function use_legacy_bindings. INCOMPATIBILITY. * Theory syntax: some popular names (e.g. 'class', 'declaration', 'fun', 'help', 'if') are now keywords. INCOMPATIBILITY, use double quotes. * Theory loader: be more serious about observing the static theory header specifications (including optional directories), but not the accidental file locations of previously successful loads. The strict update policy of former update_thy is now already performed by use_thy, so the former has been removed; use_thys updates several theories simultaneously, just as 'imports' within a theory header specification, but without merging the results. Potential INCOMPATIBILITY: may need to refine theory headers and commands ROOT.ML which depend on load order. * Theory loader: optional support for content-based file identification, instead of the traditional scheme of full physical path plus date stamp; configured by the ISABELLE_FILE_IDENT setting (cf. the system manual). The new scheme allows to work with non-finished theories in persistent session images, such that source files may be moved later on without requiring reloads. * Theory loader: old-style ML proof scripts being *attached* to a thy file (with the same base name as the theory) are considered a legacy feature, which will disappear eventually. Even now, the theory loader no longer maintains dependencies on such files. * Syntax: the scope for resolving ambiguities via type-inference is now limited to individual terms, instead of whole simultaneous specifications as before. This greatly reduces the complexity of the syntax module and improves flexibility by separating parsing and type-checking. INCOMPATIBILITY: additional type-constraints (explicit 'fixes' etc.) are required in rare situations. * Syntax: constants introduced by new-style packages ('definition', 'abbreviation' etc.) are passed through the syntax module in ``authentic mode''. This means that associated mixfix annotations really stick to such constants, independently of potential name space ambiguities introduced later on. INCOMPATIBILITY: constants in parse trees are represented slightly differently, may need to adapt syntax translations accordingly. Use CONST marker in 'translations' and @{const_syntax} antiquotation in 'parse_translation' etc. * Legacy goal package: reduced interface to the bare minimum required to keep existing proof scripts running. Most other user-level functions are now part of the OldGoals structure, which is *not* open by default (consider isatool expandshort before open OldGoals). Removed top_sg, prin, printyp, pprint_term/typ altogether, because these tend to cause confusion about the actual goal (!) context being used here, which is not necessarily the same as the_context(). * Command 'find_theorems': supports "*" wild-card in "name:" criterion; "with_dups" option. Certain ProofGeneral versions might support a specific search form (see ProofGeneral/CHANGES). * The ``prems limit'' option (cf. ProofContext.prems_limit) is now -1 by default, which means that "prems" (and also "fixed variables") are suppressed from proof state output. Note that the ProofGeneral settings mechanism allows to change and save options persistently, but older versions of Isabelle will fail to start up if a negative prems limit is imposed. * Local theory targets may be specified by non-nested blocks of ``context/locale/class ... begin'' followed by ``end''. The body may contain definitions, theorems etc., including any derived mechanism that has been implemented on top of these primitives. This concept generalizes the existing ``theorem (in ...)'' towards more versatility and scalability. * Proof General interface: proper undo of final 'end' command; discontinued Isabelle/classic mode (ML proof scripts). *** Document preparation *** * Added antiquotation @{theory name} which prints the given name, after checking that it refers to a valid ancestor theory in the current context. * Added antiquotations @{ML_type text} and @{ML_struct text} which check the given source text as ML type/structure, printing verbatim. * Added antiquotation @{abbrev "c args"} which prints the abbreviation "c args == rhs" given in the current context. (Any number of arguments may be given on the LHS.) *** Pure *** * The 'class' package offers a combination of axclass and locale to achieve Haskell-like type classes in Isabelle. Definitions and theorems within a class context produce both relative results (with implicit parameters according to the locale context), and polymorphic constants with qualified polymorphism (according to the class context). Within the body context of a 'class' target, a separate syntax layer ("user space type system") takes care of converting between global polymorphic consts and internal locale representation. See src/HOL/ex/Classpackage.thy for examples (as well as main HOL). "isatool doc classes" provides a tutorial. * Generic code generator framework allows to generate executable code for ML and Haskell (including Isabelle classes). A short usage sketch: internal compilation: export_code in SML writing SML code to a file: export_code in SML writing OCaml code to a file: export_code in OCaml writing Haskell code to a bunch of files: export_code in Haskell evaluating closed propositions to True/False using code generation: method ``eval'' Reasonable default setup of framework in HOL. Theorem attributs for selecting and transforming function equations theorems: [code fun]: select a theorem as function equation for a specific constant [code fun del]: deselect a theorem as function equation for a specific constant [code inline]: select an equation theorem for unfolding (inlining) in place [code inline del]: deselect an equation theorem for unfolding (inlining) in place User-defined serializations (target in {SML, OCaml, Haskell}): code_const {(target) }+ code_type {(target) }+ code_instance {(target)}+ where instance ::= :: code_class {(target) }+ where class target syntax ::= {where { == }+}? code_instance and code_class only are effective to target Haskell. For example usage see src/HOL/ex/Codegenerator.thy and src/HOL/ex/Codegenerator_Pretty.thy. A separate tutorial on code generation from Isabelle/HOL theories is available via "isatool doc codegen". * Code generator: consts in 'consts_code' Isar commands are now referred to by usual term syntax (including optional type annotations). * Command 'no_translations' removes translation rules from theory syntax. * Overloaded definitions are now actually checked for acyclic dependencies. The overloading scheme is slightly more general than that of Haskell98, although Isabelle does not demand an exact correspondence to type class and instance declarations. INCOMPATIBILITY, use ``defs (unchecked overloaded)'' to admit more exotic versions of overloading -- at the discretion of the user! Polymorphic constants are represented via type arguments, i.e. the instantiation that matches an instance against the most general declaration given in the signature. For example, with the declaration c :: 'a => 'a => 'a, an instance c :: nat => nat => nat is represented as c(nat). Overloading is essentially simultaneous structural recursion over such type arguments. Incomplete specification patterns impose global constraints on all occurrences, e.g. c('a * 'a) on the LHS means that more general c('a * 'b) will be disallowed on any RHS. Command 'print_theory' outputs the normalized system of recursive equations, see section "definitions". * Configuration options are maintained within the theory or proof context (with name and type bool/int/string), providing a very simple interface to a poor-man's version of general context data. Tools may declare options in ML (e.g. using Attrib.config_int) and then refer to these values using Config.get etc. Users may change options via an associated attribute of the same name. This form of context declaration works particularly well with commands 'declare' or 'using', for example ``declare [[foo = 42]]''. Thus it has become very easy to avoid global references, which would not observe Isar toplevel undo/redo and fail to work with multithreading. Various global ML references of Pure and HOL have been turned into configuration options: Unify.search_bound unify_search_bound Unify.trace_bound unify_trace_bound Unify.trace_simp unify_trace_simp Unify.trace_types unify_trace_types Simplifier.simp_depth_limit simp_depth_limit Blast.depth_limit blast_depth_limit DatatypeProp.dtK datatype_distinctness_limit fast_arith_neq_limit fast_arith_neq_limit fast_arith_split_limit fast_arith_split_limit * Named collections of theorems may be easily installed as context data using the functor NamedThmsFun (see also src/Pure/Tools/named_thms.ML). The user may add or delete facts via attributes; there is also a toplevel print command. This facility is just a common case of general context data, which is the preferred way for anything more complex than just a list of facts in canonical order. * Isar: command 'declaration' augments a local theory by generic declaration functions written in ML. This enables arbitrary content being added to the context, depending on a morphism that tells the difference of the original declaration context wrt. the application context encountered later on. * Isar: proper interfaces for simplification procedures. Command 'simproc_setup' declares named simprocs (with match patterns, and body text in ML). Attribute "simproc" adds/deletes simprocs in the current context. ML antiquotation @{simproc name} retrieves named simprocs. * Isar: an extra pair of brackets around attribute declarations abbreviates a theorem reference involving an internal dummy fact, which will be ignored later --- only the effect of the attribute on the background context will persist. This form of in-place declarations is particularly useful with commands like 'declare' and 'using', for example ``have A using [[simproc a]] by simp''. * Isar: method "assumption" (and implicit closing of subproofs) now takes simple non-atomic goal assumptions into account: after applying an assumption as a rule the resulting subgoals are solved by atomic assumption steps. This is particularly useful to finish 'obtain' goals, such as "!!x. (!!x. P x ==> thesis) ==> P x ==> thesis", without referring to the original premise "!!x. P x ==> thesis" in the Isar proof context. POTENTIAL INCOMPATIBILITY: method "assumption" is more permissive. * Isar: implicit use of prems from the Isar proof context is considered a legacy feature. Common applications like ``have A .'' may be replaced by ``have A by fact'' or ``note `A`''. In general, referencing facts explicitly here improves readability and maintainability of proof texts. * Isar: improper proof element 'guess' is like 'obtain', but derives the obtained context from the course of reasoning! For example: assume "EX x y. A x & B y" -- "any previous fact" then guess x and y by clarify This technique is potentially adventurous, depending on the facts and proof tools being involved here. * Isar: known facts from the proof context may be specified as literal propositions, using ASCII back-quote syntax. This works wherever named facts used to be allowed so far, in proof commands, proof methods, attributes etc. Literal facts are retrieved from the context according to unification of type and term parameters. For example, provided that "A" and "A ==> B" and "!!x. P x ==> Q x" are known theorems in the current context, then these are valid literal facts: `A` and `A ==> B` and `!!x. P x ==> Q x" as well as `P a ==> Q a` etc. There is also a proof method "fact" which does the same composition for explicit goal states, e.g. the following proof texts coincide with certain special cases of literal facts: have "A" by fact == note `A` have "A ==> B" by fact == note `A ==> B` have "!!x. P x ==> Q x" by fact == note `!!x. P x ==> Q x` have "P a ==> Q a" by fact == note `P a ==> Q a` * Isar: ":" (colon) is no longer a symbolic identifier character in outer syntax. Thus symbolic identifiers may be used without additional white space in declarations like this: ``assume *: A''. * Isar: 'print_facts' prints all local facts of the current context, both named and unnamed ones. * Isar: 'def' now admits simultaneous definitions, e.g.: def x == "t" and y == "u" * Isar: added command 'unfolding', which is structurally similar to 'using', but affects both the goal state and facts by unfolding given rewrite rules. Thus many occurrences of the 'unfold' method or 'unfolded' attribute may be replaced by first-class proof text. * Isar: methods 'unfold' / 'fold', attributes 'unfolded' / 'folded', and command 'unfolding' now all support object-level equalities (potentially conditional). The underlying notion of rewrite rule is analogous to the 'rule_format' attribute, but *not* that of the Simplifier (which is usually more generous). * Isar: the new attribute [rotated n] (default n = 1) rotates the premises of a theorem by n. Useful in conjunction with drule. * Isar: the goal restriction operator [N] (default N = 1) evaluates a method expression within a sandbox consisting of the first N sub-goals, which need to exist. For example, ``simp_all [3]'' simplifies the first three sub-goals, while (rule foo, simp_all)[] simplifies all new goals that emerge from applying rule foo to the originally first one. * Isar: schematic goals are no longer restricted to higher-order patterns; e.g. ``lemma "?P(?x)" by (rule TrueI)'' now works as expected. * Isar: the conclusion of a long theorem statement is now either 'shows' (a simultaneous conjunction, as before), or 'obtains' (essentially a disjunction of cases with local parameters and assumptions). The latter allows to express general elimination rules adequately; in this notation common elimination rules look like this: lemma exE: -- "EX x. P x ==> (!!x. P x ==> thesis) ==> thesis" assumes "EX x. P x" obtains x where "P x" lemma conjE: -- "A & B ==> (A ==> B ==> thesis) ==> thesis" assumes "A & B" obtains A and B lemma disjE: -- "A | B ==> (A ==> thesis) ==> (B ==> thesis) ==> thesis" assumes "A | B" obtains A | B The subsequent classical rules even refer to the formal "thesis" explicitly: lemma classical: -- "(~ thesis ==> thesis) ==> thesis" obtains "~ thesis" lemma Peirce's_Law: -- "((thesis ==> something) ==> thesis) ==> thesis" obtains "thesis ==> something" The actual proof of an 'obtains' statement is analogous to that of the Isar proof element 'obtain', only that there may be several cases. Optional case names may be specified in parentheses; these will be available both in the present proof and as annotations in the resulting rule, for later use with the 'cases' method (cf. attribute case_names). * Isar: the assumptions of a long theorem statement are available as "assms" fact in the proof context. This is more appropriate than the (historical) "prems", which refers to all assumptions of the current context, including those from the target locale, proof body etc. * Isar: 'print_statement' prints theorems from the current theory or proof context in long statement form, according to the syntax of a top-level lemma. * Isar: 'obtain' takes an optional case name for the local context introduction rule (default "that"). * Isar: removed obsolete 'concl is' patterns. INCOMPATIBILITY, use explicit (is "_ ==> ?foo") in the rare cases where this still happens to occur. * Pure: syntax "CONST name" produces a fully internalized constant according to the current context. This is particularly useful for syntax translations that should refer to internal constant representations independently of name spaces. * Pure: syntax constant for foo (binder "FOO ") is called "foo_binder" instead of "FOO ". This allows multiple binder declarations to coexist in the same context. INCOMPATIBILITY. * Isar/locales: 'notation' provides a robust interface to the 'syntax' primitive that also works in a locale context (both for constants and fixed variables). Type declaration and internal syntactic representation of given constants retrieved from the context. Likewise, the 'no_notation' command allows to remove given syntax annotations from the current context. * Isar/locales: new derived specification elements 'axiomatization', 'definition', 'abbreviation', which support type-inference, admit object-level specifications (equality, equivalence). See also the isar-ref manual. Examples: axiomatization eq (infix "===" 50) where eq_refl: "x === x" and eq_subst: "x === y ==> P x ==> P y" definition "f x y = x + y + 1" definition g where "g x = f x x" abbreviation neq (infix "=!=" 50) where "x =!= y == ~ (x === y)" These specifications may be also used in a locale context. Then the constants being introduced depend on certain fixed parameters, and the constant name is qualified by the locale base name. An internal abbreviation takes care for convenient input and output, making the parameters implicit and using the original short name. See also src/HOL/ex/Abstract_NAT.thy for an example of deriving polymorphic entities from a monomorphic theory. Presently, abbreviations are only available 'in' a target locale, but not inherited by general import expressions. Also note that 'abbreviation' may be used as a type-safe replacement for 'syntax' + 'translations' in common applications. The "no_abbrevs" print mode prevents folding of abbreviations in term output. Concrete syntax is attached to specified constants in internal form, independently of name spaces. The parse tree representation is slightly different -- use 'notation' instead of raw 'syntax', and 'translations' with explicit "CONST" markup to accommodate this. * Pure/Isar: unified syntax for new-style specification mechanisms (e.g. 'definition', 'abbreviation', or 'inductive' in HOL) admits full type inference and dummy patterns ("_"). For example: definition "K x _ = x" inductive conj for A B where "A ==> B ==> conj A B" * Pure: command 'print_abbrevs' prints all constant abbreviations of the current context. Print mode "no_abbrevs" prevents inversion of abbreviations on output. * Isar/locales: improved parameter handling: use of locales "var" and "struct" no longer necessary; - parameter renamings are no longer required to be injective. For example, this allows to define endomorphisms as locale endom = homom mult mult h. * Isar/locales: changed the way locales with predicates are defined. Instead of accumulating the specification, the imported expression is now an interpretation. INCOMPATIBILITY: different normal form of locale expressions. In particular, in interpretations of locales with predicates, goals repesenting already interpreted fragments are not removed automatically. Use methods `intro_locales' and `unfold_locales'; see below. * Isar/locales: new methods `intro_locales' and `unfold_locales' provide backward reasoning on locales predicates. The methods are aware of interpretations and discharge corresponding goals. `intro_locales' is less aggressive then `unfold_locales' and does not unfold predicates to assumptions. * Isar/locales: the order in which locale fragments are accumulated has changed. This enables to override declarations from fragments due to interpretations -- for example, unwanted simp rules. * Isar/locales: interpretation in theories and proof contexts has been extended. One may now specify (and prove) equations, which are unfolded in interpreted theorems. This is useful for replacing defined concepts (constants depending on locale parameters) by concepts already existing in the target context. Example: interpretation partial_order ["op <= :: [int, int] => bool"] where "partial_order.less (op <=) (x::int) y = (x < y)" Typically, the constant `partial_order.less' is created by a definition specification element in the context of locale partial_order. * Method "induct": improved internal context management to support local fixes and defines on-the-fly. Thus explicit meta-level connectives !! and ==> are rarely required anymore in inductive goals (using object-logic connectives for this purpose has been long obsolete anyway). Common proof patterns are explained in src/HOL/Induct/Common_Patterns.thy, see also src/HOL/Isar_examples/Puzzle.thy and src/HOL/Lambda for realistic examples. * Method "induct": improved handling of simultaneous goals. Instead of introducing object-level conjunction, the statement is now split into several conclusions, while the corresponding symbolic cases are nested accordingly. INCOMPATIBILITY, proofs need to be structured explicitly, see src/HOL/Induct/Common_Patterns.thy, for example. * Method "induct": mutual induction rules are now specified as a list of rule sharing the same induction cases. HOL packages usually provide foo_bar.inducts for mutually defined items foo and bar (e.g. inductive predicates/sets or datatypes). INCOMPATIBILITY, users need to specify mutual induction rules differently, i.e. like this: (induct rule: foo_bar.inducts) (induct set: foo bar) (induct pred: foo bar) (induct type: foo bar) The ML function ProjectRule.projections turns old-style rules into the new format. * Method "coinduct": dual of induction, see src/HOL/Library/Coinductive_List.thy for various examples. * Method "cases", "induct", "coinduct": the ``(open)'' option is considered a legacy feature. * Attribute "symmetric" produces result with standardized schematic variables (index 0). Potential INCOMPATIBILITY. * Simplifier: by default the simplifier trace only shows top level rewrites now. That is, trace_simp_depth_limit is set to 1 by default. Thus there is less danger of being flooded by the trace. The trace indicates where parts have been suppressed. * Provers/classical: removed obsolete classical version of elim_format attribute; classical elim/dest rules are now treated uniformly when manipulating the claset. * Provers/classical: stricter checks to ensure that supplied intro, dest and elim rules are well-formed; dest and elim rules must have at least one premise. * Provers/classical: attributes dest/elim/intro take an optional weight argument for the rule (just as the Pure versions). Weights are ignored by automated tools, but determine the search order of single rule steps. * Syntax: input syntax now supports dummy variable binding "%_. b", where the body does not mention the bound variable. Note that dummy patterns implicitly depend on their context of bounds, which makes "{_. _}" match any set comprehension as expected. Potential INCOMPATIBILITY -- parse translations need to cope with syntactic constant "_idtdummy" in the binding position. * Syntax: removed obsolete syntactic constant "_K" and its associated parse translation. INCOMPATIBILITY -- use dummy abstraction instead, for example "A -> B" => "Pi A (%_. B)". * Pure: 'class_deps' command visualizes the subclass relation, using the graph browser tool. * Pure: 'print_theory' now suppresses certain internal declarations by default; use '!' option for full details. *** HOL *** * Method "metis" proves goals by applying the Metis general-purpose resolution prover (see also http://gilith.com/software/metis/). Examples are in the directory MetisExamples. WARNING: the Isabelle/HOL-Metis integration does not yet work properly with multi-threading. * Command 'sledgehammer' invokes external automatic theorem provers as background processes. It generates calls to the "metis" method if successful. These can be pasted into the proof. Users do not have to wait for the automatic provers to return. WARNING: does not really work with multi-threading. * New "auto_quickcheck" feature tests outermost goal statements for potential counter-examples. Controlled by ML references auto_quickcheck (default true) and auto_quickcheck_time_limit (default 5000 milliseconds). Fails silently if statements is outside of executable fragment, or any other codgenerator problem occurs. * New constant "undefined" with axiom "undefined x = undefined". * Added class "HOL.eq", allowing for code generation with polymorphic equality. * Some renaming of class constants due to canonical name prefixing in the new 'class' package: HOL.abs ~> HOL.abs_class.abs HOL.divide ~> HOL.divide_class.divide 0 ~> HOL.zero_class.zero 1 ~> HOL.one_class.one op + ~> HOL.plus_class.plus op - ~> HOL.minus_class.minus uminus ~> HOL.minus_class.uminus op * ~> HOL.times_class.times op < ~> HOL.ord_class.less op <= > HOL.ord_class.less_eq Nat.power ~> Power.power_class.power Nat.size ~> Nat.size_class.size Numeral.number_of ~> Numeral.number_class.number_of FixedPoint.Inf ~> Lattices.complete_lattice_class.Inf FixedPoint.Sup ~> Lattices.complete_lattice_class.Sup Orderings.min ~> Orderings.ord_class.min Orderings.max ~> Orderings.ord_class.max Divides.op div ~> Divides.div_class.div Divides.op mod ~> Divides.div_class.mod Divides.op dvd ~> Divides.div_class.dvd INCOMPATIBILITY. Adaptions may be required in the following cases: a) User-defined constants using any of the names "plus", "minus", "times", "less" or "less_eq". The standard syntax translations for "+", "-" and "*" may go wrong. INCOMPATIBILITY: use more specific names. b) Variables named "plus", "minus", "times", "less", "less_eq" INCOMPATIBILITY: use more specific names. c) Permutative equations (e.g. "a + b = b + a") Since the change of names also changes the order of terms, permutative rewrite rules may get applied in a different order. Experience shows that this is rarely the case (only two adaptions in the whole Isabelle distribution). INCOMPATIBILITY: rewrite proofs d) ML code directly refering to constant names This in general only affects hand-written proof tactics, simprocs and so on. INCOMPATIBILITY: grep your sourcecode and replace names. Consider using @{const_name} antiquotation. * New class "default" with associated constant "default". * Function "sgn" is now overloaded and available on int, real, complex (and other numeric types), using class "sgn". Two possible defs of sgn are given as equational assumptions in the classes sgn_if and sgn_div_norm; ordered_idom now also inherits from sgn_if. INCOMPATIBILITY. * Locale "partial_order" now unified with class "order" (cf. theory Orderings), added parameter "less". INCOMPATIBILITY. * Renamings in classes "order" and "linorder": facts "refl", "trans" and "cases" to "order_refl", "order_trans" and "linorder_cases", to avoid clashes with HOL "refl" and "trans". INCOMPATIBILITY. * Classes "order" and "linorder": potential INCOMPATIBILITY due to changed order of proof goals in instance proofs. * The transitivity reasoner for partial and linear orders is set up for classes "order" and "linorder". Instances of the reasoner are available in all contexts importing or interpreting the corresponding locales. Method "order" invokes the reasoner separately; the reasoner is also integrated with the Simplifier as a solver. Diagnostic command 'print_orders' shows the available instances of the reasoner in the current context. * Localized monotonicity predicate in theory "Orderings"; integrated lemmas max_of_mono and min_of_mono with this predicate. INCOMPATIBILITY. * Formulation of theorem "dense" changed slightly due to integration with new class dense_linear_order. * Uniform lattice theory development in HOL. constants "meet" and "join" now named "inf" and "sup" constant "Meet" now named "Inf" classes "meet_semilorder" and "join_semilorder" now named "lower_semilattice" and "upper_semilattice" class "lorder" now named "lattice" class "comp_lat" now named "complete_lattice" Instantiation of lattice classes allows explicit definitions for "inf" and "sup" operations (or "Inf" and "Sup" for complete lattices). INCOMPATIBILITY. Theorem renames: meet_left_le ~> inf_le1 meet_right_le ~> inf_le2 join_left_le ~> sup_ge1 join_right_le ~> sup_ge2 meet_join_le ~> inf_sup_ord le_meetI ~> le_infI join_leI ~> le_supI le_meet ~> le_inf_iff le_join ~> ge_sup_conv meet_idempotent ~> inf_idem join_idempotent ~> sup_idem meet_comm ~> inf_commute join_comm ~> sup_commute meet_leI1 ~> le_infI1 meet_leI2 ~> le_infI2 le_joinI1 ~> le_supI1 le_joinI2 ~> le_supI2 meet_assoc ~> inf_assoc join_assoc ~> sup_assoc meet_left_comm ~> inf_left_commute meet_left_idempotent ~> inf_left_idem join_left_comm ~> sup_left_commute join_left_idempotent ~> sup_left_idem meet_aci ~> inf_aci join_aci ~> sup_aci le_def_meet ~> le_iff_inf le_def_join ~> le_iff_sup join_absorp2 ~> sup_absorb2 join_absorp1 ~> sup_absorb1 meet_absorp1 ~> inf_absorb1 meet_absorp2 ~> inf_absorb2 meet_join_absorp ~> inf_sup_absorb join_meet_absorp ~> sup_inf_absorb distrib_join_le ~> distrib_sup_le distrib_meet_le ~> distrib_inf_le add_meet_distrib_left ~> add_inf_distrib_left add_join_distrib_left ~> add_sup_distrib_left is_join_neg_meet ~> is_join_neg_inf is_meet_neg_join ~> is_meet_neg_sup add_meet_distrib_right ~> add_inf_distrib_right add_join_distrib_right ~> add_sup_distrib_right add_meet_join_distribs ~> add_sup_inf_distribs join_eq_neg_meet ~> sup_eq_neg_inf meet_eq_neg_join ~> inf_eq_neg_sup add_eq_meet_join ~> add_eq_inf_sup meet_0_imp_0 ~> inf_0_imp_0 join_0_imp_0 ~> sup_0_imp_0 meet_0_eq_0 ~> inf_0_eq_0 join_0_eq_0 ~> sup_0_eq_0 neg_meet_eq_join ~> neg_inf_eq_sup neg_join_eq_meet ~> neg_sup_eq_inf join_eq_if ~> sup_eq_if mono_meet ~> mono_inf mono_join ~> mono_sup meet_bool_eq ~> inf_bool_eq join_bool_eq ~> sup_bool_eq meet_fun_eq ~> inf_fun_eq join_fun_eq ~> sup_fun_eq meet_set_eq ~> inf_set_eq join_set_eq ~> sup_set_eq meet1_iff ~> inf1_iff meet2_iff ~> inf2_iff meet1I ~> inf1I meet2I ~> inf2I meet1D1 ~> inf1D1 meet2D1 ~> inf2D1 meet1D2 ~> inf1D2 meet2D2 ~> inf2D2 meet1E ~> inf1E meet2E ~> inf2E join1_iff ~> sup1_iff join2_iff ~> sup2_iff join1I1 ~> sup1I1 join2I1 ~> sup2I1 join1I1 ~> sup1I1 join2I2 ~> sup1I2 join1CI ~> sup1CI join2CI ~> sup2CI join1E ~> sup1E join2E ~> sup2E is_meet_Meet ~> is_meet_Inf Meet_bool_def ~> Inf_bool_def Meet_fun_def ~> Inf_fun_def Meet_greatest ~> Inf_greatest Meet_lower ~> Inf_lower Meet_set_def ~> Inf_set_def Sup_def ~> Sup_Inf Sup_bool_eq ~> Sup_bool_def Sup_fun_eq ~> Sup_fun_def Sup_set_eq ~> Sup_set_def listsp_meetI ~> listsp_infI listsp_meet_eq ~> listsp_inf_eq meet_min ~> inf_min join_max ~> sup_max * Added syntactic class "size"; overloaded constant "size" now has type "'a::size ==> bool" * Internal reorganisation of `size' of datatypes: size theorems "foo.size" are no longer subsumed by "foo.simps" (but are still simplification rules by default!); theorems "prod.size" now named "*.size". * Class "div" now inherits from class "times" rather than "type". INCOMPATIBILITY. * HOL/Finite_Set: "name-space" locales Lattice, Distrib_lattice, Linorder etc. have disappeared; operations defined in terms of fold_set now are named Inf_fin, Sup_fin. INCOMPATIBILITY. * HOL/Nat: neq0_conv no longer declared as iff. INCOMPATIBILITY. * HOL-Word: New extensive library and type for generic, fixed size machine words, with arithmetic, bit-wise, shifting and rotating operations, reflection into int, nat, and bool lists, automation for linear arithmetic (by automatic reflection into nat or int), including lemmas on overflow and monotonicity. Instantiated to all appropriate arithmetic type classes, supporting automatic simplification of numerals on all operations. * Library/Boolean_Algebra: locales for abstract boolean algebras. * Library/Numeral_Type: numbers as types, e.g. TYPE(32). * Code generator library theories: - Code_Integer represents HOL integers by big integer literals in target languages. - Code_Char represents HOL characters by character literals in target languages. - Code_Char_chr like Code_Char, but also offers treatment of character codes; includes Code_Integer. - Executable_Set allows to generate code for finite sets using lists. - Executable_Rat implements rational numbers as triples (sign, enumerator, denominator). - Executable_Real implements a subset of real numbers, namly those representable by rational numbers. - Efficient_Nat implements natural numbers by integers, which in general will result in higher efficency; pattern matching with 0/Suc is eliminated; includes Code_Integer. - Code_Index provides an additional datatype index which is mapped to target-language built-in integers. - Code_Message provides an additional datatype message_string which is isomorphic to strings; messages are mapped to target-language strings. * New package for inductive predicates An n-ary predicate p with m parameters z_1, ..., z_m can now be defined via inductive p :: "U_1 => ... => U_m => T_1 => ... => T_n => bool" for z_1 :: U_1 and ... and z_n :: U_m where rule_1: "... ==> p z_1 ... z_m t_1_1 ... t_1_n" | ... with full support for type-inference, rather than consts s :: "U_1 => ... => U_m => (T_1 * ... * T_n) set" abbreviation p :: "U_1 => ... => U_m => T_1 => ... => T_n => bool" where "p z_1 ... z_m x_1 ... x_n == (x_1, ..., x_n) : s z_1 ... z_m" inductive "s z_1 ... z_m" intros rule_1: "... ==> (t_1_1, ..., t_1_n) : s z_1 ... z_m" ... For backward compatibility, there is a wrapper allowing inductive sets to be defined with the new package via inductive_set s :: "U_1 => ... => U_m => (T_1 * ... * T_n) set" for z_1 :: U_1 and ... and z_n :: U_m where rule_1: "... ==> (t_1_1, ..., t_1_n) : s z_1 ... z_m" | ... or inductive_set s :: "U_1 => ... => U_m => (T_1 * ... * T_n) set" and p :: "U_1 => ... => U_m => T_1 => ... => T_n => bool" for z_1 :: U_1 and ... and z_n :: U_m where "p z_1 ... z_m x_1 ... x_n == (x_1, ..., x_n) : s z_1 ... z_m" | rule_1: "... ==> p z_1 ... z_m t_1_1 ... t_1_n" | ... if the additional syntax "p ..." is required. Numerous examples can be found in the subdirectories src/HOL/Auth, src/HOL/Bali, src/HOL/Induct, and src/HOL/MicroJava. INCOMPATIBILITIES: - Since declaration and definition of inductive sets or predicates is no longer separated, abbreviations involving the newly introduced sets or predicates must be specified together with the introduction rules after the 'where' keyword (see above), rather than before the actual inductive definition. - The variables in induction and elimination rules are now quantified in the order of their occurrence in the introduction rules, rather than in alphabetical order. Since this may break some proofs, these proofs either have to be repaired, e.g. by reordering the variables a_i_1 ... a_i_{k_i} in Isar 'case' statements of the form case (rule_i a_i_1 ... a_i_{k_i}) or the old order of quantification has to be restored by explicitly adding meta-level quantifiers in the introduction rules, i.e. | rule_i: "!!a_i_1 ... a_i_{k_i}. ... ==> p z_1 ... z_m t_i_1 ... t_i_n" - The format of the elimination rules is now p z_1 ... z_m x_1 ... x_n ==> (!!a_1_1 ... a_1_{k_1}. x_1 = t_1_1 ==> ... ==> x_n = t_1_n ==> ... ==> P) ==> ... ==> P for predicates and (x_1, ..., x_n) : s z_1 ... z_m ==> (!!a_1_1 ... a_1_{k_1}. x_1 = t_1_1 ==> ... ==> x_n = t_1_n ==> ... ==> P) ==> ... ==> P for sets rather than x : s z_1 ... z_m ==> (!!a_1_1 ... a_1_{k_1}. x = (t_1_1, ..., t_1_n) ==> ... ==> P) ==> ... ==> P This may require terms in goals to be expanded to n-tuples (e.g. using case_tac or simplification with the split_paired_all rule) before the above elimination rule is applicable. - The elimination or case analysis rules for (mutually) inductive sets or predicates are now called "p_1.cases" ... "p_k.cases". The list of rules "p_1_..._p_k.elims" is no longer available. * New package "function"/"fun" for general recursive functions, supporting mutual and nested recursion, definitions in local contexts, more general pattern matching and partiality. See HOL/ex/Fundefs.thy for small examples, and the separate tutorial on the function package. The old recdef "package" is still available as before, but users are encouraged to use the new package. * Method "lexicographic_order" automatically synthesizes termination relations as lexicographic combinations of size measures. * Case-expressions allow arbitrary constructor-patterns (including "_") and take their order into account, like in functional programming. Internally, this is translated into nested case-expressions; missing cases are added and mapped to the predefined constant "undefined". In complicated cases printing may no longer show the original input but the internal form. Lambda-abstractions allow the same form of pattern matching: "% pat1 => e1 | ..." is an abbreviation for "%x. case x of pat1 => e1 | ..." where x is a new variable. * IntDef: The constant "int :: nat => int" has been removed; now "int" is an abbreviation for "of_nat :: nat => int". The simplification rules for "of_nat" have been changed to work like "int" did previously. Potential INCOMPATIBILITY: - "of_nat (Suc m)" simplifies to "1 + of_nat m" instead of "of_nat m + 1" - of_nat_diff and of_nat_mult are no longer default simp rules * Method "algebra" solves polynomial equations over (semi)rings using Groebner bases. The (semi)ring structure is defined by locales and the tool setup depends on that generic context. Installing the method for a specific type involves instantiating the locale and possibly adding declarations for computation on the coefficients. The method is already instantiated for natural numbers and for the axiomatic class of idoms with numerals. See also the paper by Chaieb and Wenzel at CALCULEMUS 2007 for the general principles underlying this architecture of context-aware proof-tools. * Method "ferrack" implements quantifier elimination over special-purpose dense linear orders using locales (analogous to "algebra"). The method is already installed for class {ordered_field,recpower,number_ring} which subsumes real, hyperreal, rat, etc. * Former constant "List.op @" now named "List.append". Use ML antiquotations @{const_name List.append} or @{term " ... @ ... "} to circumvent possible incompatibilities when working on ML level. * primrec: missing cases mapped to "undefined" instead of "arbitrary". * New function listsum :: 'a list => 'a for arbitrary monoids. Special syntax: "SUM x <- xs. f x" (and latex variants) * New syntax for Haskell-like list comprehension (input only), eg. [(x,y). x <- xs, y <- ys, x ~= y], see also src/HOL/List.thy. * The special syntax for function "filter" has changed from [x : xs. P] to [x <- xs. P] to avoid an ambiguity caused by list comprehension syntax, and for uniformity. INCOMPATIBILITY. * [a..b] is now defined for arbitrary linear orders. It used to be defined on nat only, as an abbreviation for [a.. B" for equality on bool (with priority 25 like -->); output depends on the "iff" print_mode, the default is "A = B" (with priority 50). * Relations less (<) and less_eq (<=) are also available on type bool. Modified syntax to disallow nesting without explicit parentheses, e.g. "(x < y) < z" or "x < (y < z)", but NOT "x < y < z". Potential INCOMPATIBILITY. * "LEAST x:A. P" expands to "LEAST x. x:A & P" (input only). * Relation composition operator "op O" now has precedence 75 and binds stronger than union and intersection. INCOMPATIBILITY. * The old set interval syntax "{m..n(}" (and relatives) has been removed. Use "{m.. ==> False", equivalences (i.e. "=" on type bool) are handled, variable names of the form "lit_" are no longer reserved, significant speedup. * Methods "sat" and "satx" can now replay MiniSat proof traces. zChaff is still supported as well. * 'inductive' and 'datatype': provide projections of mutual rules, bundled as foo_bar.inducts; * Library: moved theories Parity, GCD, Binomial, Infinite_Set to Library. * Library: moved theory Accessible_Part to main HOL. * Library: added theory Coinductive_List of potentially infinite lists as greatest fixed-point. * Library: added theory AssocList which implements (finite) maps as association lists. * Method "evaluation" solves goals (i.e. a boolean expression) efficiently by compiling it to ML. The goal is "proved" (via an oracle) if it evaluates to True. * Linear arithmetic now splits certain operators (e.g. min, max, abs) also when invoked by the simplifier. This results in the Simplifier being more powerful on arithmetic goals. INCOMPATIBILITY. Configuration option fast_arith_split_limit=0 recovers the old behavior. * Support for hex (0x20) and binary (0b1001) numerals. * New method: reify eqs (t), where eqs are equations for an interpretation I :: 'a list => 'b => 'c and t::'c is an optional parameter, computes a term s::'b and a list xs::'a list and proves the theorem I xs s = t. This is also known as reification or quoting. The resulting theorem is applied to the subgoal to substitute t with I xs s. If t is omitted, the subgoal itself is reified. * New method: reflection corr_thm eqs (t). The parameters eqs and (t) are as explained above. corr_thm is a theorem for I vs (f t) = I vs t, where f is supposed to be a computable function (in the sense of code generattion). The method uses reify to compute s and xs as above then applies corr_thm and uses normalization by evaluation to "prove" f s = r and finally gets the theorem t = r, which is again applied to the subgoal. An Example is available in src/HOL/ex/ReflectionEx.thy. * Reflection: Automatic reification now handels binding, an example is available in src/HOL/ex/ReflectionEx.thy * HOL-Statespace: ``State Spaces: The Locale Way'' introduces a command 'statespace' that is similar to 'record', but introduces an abstract specification based on the locale infrastructure instead of HOL types. This leads to extra flexibility in composing state spaces, in particular multiple inheritance and renaming of components. *** HOL-Complex *** * Hyperreal: Functions root and sqrt are now defined on negative real inputs so that root n (- x) = - root n x and sqrt (- x) = - sqrt x. Nonnegativity side conditions have been removed from many lemmas, so that more subgoals may now be solved by simplification; potential INCOMPATIBILITY. * Real: new type classes formalize real normed vector spaces and algebras, using new overloaded constants scaleR :: real => 'a => 'a and norm :: 'a => real. * Real: constant of_real :: real => 'a::real_algebra_1 injects from reals into other types. The overloaded constant Reals :: 'a set is now defined as range of_real; potential INCOMPATIBILITY. * Real: proper support for ML code generation, including 'quickcheck'. Reals are implemented as arbitrary precision rationals. * Hyperreal: Several constants that previously worked only for the reals have been generalized, so they now work over arbitrary vector spaces. Type annotations may need to be added in some cases; potential INCOMPATIBILITY. Infinitesimal :: ('a::real_normed_vector) star set HFinite :: ('a::real_normed_vector) star set HInfinite :: ('a::real_normed_vector) star set approx :: ('a::real_normed_vector) star => 'a star => bool monad :: ('a::real_normed_vector) star => 'a star set galaxy :: ('a::real_normed_vector) star => 'a star set (NS)LIMSEQ :: [nat => 'a::real_normed_vector, 'a] => bool (NS)convergent :: (nat => 'a::real_normed_vector) => bool (NS)Bseq :: (nat => 'a::real_normed_vector) => bool (NS)Cauchy :: (nat => 'a::real_normed_vector) => bool (NS)LIM :: ['a::real_normed_vector => 'b::real_normed_vector, 'a, 'b] => bool is(NS)Cont :: ['a::real_normed_vector => 'b::real_normed_vector, 'a] => bool deriv :: ['a::real_normed_field => 'a, 'a, 'a] => bool sgn :: 'a::real_normed_vector => 'a exp :: 'a::{recpower,real_normed_field,banach} => 'a * Complex: Some complex-specific constants are now abbreviations for overloaded ones: complex_of_real = of_real, cmod = norm, hcmod = hnorm. Other constants have been entirely removed in favor of the polymorphic versions (INCOMPATIBILITY): approx <-- capprox HFinite <-- CFinite HInfinite <-- CInfinite Infinitesimal <-- CInfinitesimal monad <-- cmonad galaxy <-- cgalaxy (NS)LIM <-- (NS)CLIM, (NS)CRLIM is(NS)Cont <-- is(NS)Contc, is(NS)contCR (ns)deriv <-- (ns)cderiv *** HOL-Algebra *** * Formalisation of ideals and the quotient construction over rings. * Order and lattice theory no longer based on records. INCOMPATIBILITY. * Renamed lemmas least_carrier -> least_closed and greatest_carrier -> greatest_closed. INCOMPATIBILITY. * Method algebra is now set up via an attribute. For examples see Ring.thy. INCOMPATIBILITY: the method is now weaker on combinations of algebraic structures. * Renamed theory CRing to Ring. *** HOL-Nominal *** * Substantial, yet incomplete support for nominal datatypes (binding structures) based on HOL-Nominal logic. See src/HOL/Nominal and src/HOL/Nominal/Examples. Prospective users should consult http://isabelle.in.tum.de/nominal/ *** ML *** * ML basics: just one true type int, which coincides with IntInf.int (even on SML/NJ). * ML within Isar: antiquotations allow to embed statically-checked formal entities in the source, referring to the context available at compile-time. For example: ML {* @{sort "{zero,one}"} *} ML {* @{typ "'a => 'b"} *} ML {* @{term "%x. x"} *} ML {* @{prop "x == y"} *} ML {* @{ctyp "'a => 'b"} *} ML {* @{cterm "%x. x"} *} ML {* @{cprop "x == y"} *} ML {* @{thm asm_rl} *} ML {* @{thms asm_rl} *} ML {* @{type_name c} *} ML {* @{type_syntax c} *} ML {* @{const_name c} *} ML {* @{const_syntax c} *} ML {* @{context} *} ML {* @{theory} *} ML {* @{theory Pure} *} ML {* @{theory_ref} *} ML {* @{theory_ref Pure} *} ML {* @{simpset} *} ML {* @{claset} *} ML {* @{clasimpset} *} The same works for sources being ``used'' within an Isar context. * ML in Isar: improved error reporting; extra verbosity with ML_Context.trace enabled. * Pure/General/table.ML: the join operations now works via exceptions DUP/SAME instead of type option. This is simpler in simple cases, and admits slightly more efficient complex applications. * Pure: 'advanced' translation functions (parse_translation etc.) now use Context.generic instead of just theory. * Pure: datatype Context.generic joins theory/Proof.context and provides some facilities for code that works in either kind of context, notably GenericDataFun for uniform theory and proof data. * Pure: simplified internal attribute type, which is now always Context.generic * thm -> Context.generic * thm. Global (theory) vs. local (Proof.context) attributes have been discontinued, while minimizing code duplication. Thm.rule_attribute and Thm.declaration_attribute build canonical attributes; see also structure Context for further operations on Context.generic, notably GenericDataFun. INCOMPATIBILITY, need to adapt attribute type declarations and definitions. * Context data interfaces (Theory/Proof/GenericDataFun): removed name/print, uninitialized data defaults to ad-hoc copy of empty value, init only required for impure data. INCOMPATIBILITY: empty really need to be empty (no dependencies on theory content!) * Pure/kernel: consts certification ignores sort constraints given in signature declarations. (This information is not relevant to the logic, but only for type inference.) SIGNIFICANT INTERNAL CHANGE, potential INCOMPATIBILITY. * Pure: axiomatic type classes are now purely definitional, with explicit proofs of class axioms and super class relations performed internally. See Pure/axclass.ML for the main internal interfaces -- notably AxClass.define_class supercedes AxClass.add_axclass, and AxClass.axiomatize_class/classrel/arity supersede Sign.add_classes/classrel/arities. * Pure/Isar: Args/Attrib parsers operate on Context.generic -- global/local versions on theory vs. Proof.context have been discontinued; Attrib.syntax and Method.syntax have been adapted accordingly. INCOMPATIBILITY, need to adapt parser expressions for attributes, methods, etc. * Pure: several functions of signature "... -> theory -> theory * ..." have been reoriented to "... -> theory -> ... * theory" in order to allow natural usage in combination with the ||>, ||>>, |-> and fold_map combinators. * Pure: official theorem names (closed derivations) and additional comments (tags) are now strictly separate. Name hints -- which are maintained as tags -- may be attached any time without affecting the derivation. * Pure: primitive rule lift_rule now takes goal cterm instead of an actual goal state (thm). Use Thm.lift_rule (Thm.cprem_of st i) to achieve the old behaviour. * Pure: the "Goal" constant is now called "prop", supporting a slightly more general idea of ``protecting'' meta-level rule statements. * Pure: Logic.(un)varify only works in a global context, which is now enforced instead of silently assumed. INCOMPATIBILITY, may use Logic.legacy_(un)varify as temporary workaround. * Pure: structure Name provides scalable operations for generating internal variable names, notably Name.variants etc. This replaces some popular functions from term.ML: Term.variant -> Name.variant Term.variantlist -> Name.variant_list Term.invent_names -> Name.invent_list Note that low-level renaming rarely occurs in new code -- operations from structure Variable are used instead (see below). * Pure: structure Variable provides fundamental operations for proper treatment of fixed/schematic variables in a context. For example, Variable.import introduces fixes for schematics of given facts and Variable.export reverses the effect (up to renaming) -- this replaces various freeze_thaw operations. * Pure: structure Goal provides simple interfaces for init/conclude/finish and tactical prove operations (replacing former Tactic.prove). Goal.prove is the canonical way to prove results within a given context; Goal.prove_global is a degraded version for theory level goals, including a global Drule.standard. Note that OldGoals.prove_goalw_cterm has long been obsolete, since it is ill-behaved in a local proof context (e.g. with local fixes/assumes or in a locale context). * Pure/Syntax: generic interfaces for parsing (Syntax.parse_term etc.) and type checking (Syntax.check_term etc.), with common combinations (Syntax.read_term etc.). These supersede former Sign.read_term etc. which are considered legacy and await removal. * Pure/Syntax: generic interfaces for type unchecking (Syntax.uncheck_terms etc.) and unparsing (Syntax.unparse_term etc.), with common combinations (Syntax.pretty_term, Syntax.string_of_term etc.). Former Sign.pretty_term, Sign.string_of_term etc. are still available for convenience, but refer to the very same operations using a mere theory instead of a full context. * Isar: simplified treatment of user-level errors, using exception ERROR of string uniformly. Function error now merely raises ERROR, without any side effect on output channels. The Isar toplevel takes care of proper display of ERROR exceptions. ML code may use plain handle/can/try; cat_error may be used to concatenate errors like this: ... handle ERROR msg => cat_error msg "..." Toplevel ML code (run directly or through the Isar toplevel) may be embedded into the Isar toplevel with exception display/debug like this: Isar.toplevel (fn () => ...) INCOMPATIBILITY, removed special transform_error facilities, removed obsolete variants of user-level exceptions (ERROR_MESSAGE, Context.PROOF, ProofContext.CONTEXT, Proof.STATE, ProofHistory.FAIL) -- use plain ERROR instead. * Isar: theory setup now has type (theory -> theory), instead of a list. INCOMPATIBILITY, may use #> to compose setup functions. * Isar: ML toplevel pretty printer for type Proof.context, subject to ProofContext.debug/verbose flags. * Isar: Toplevel.theory_to_proof admits transactions that modify the theory before entering a proof state. Transactions now always see a quasi-functional intermediate checkpoint, both in interactive and batch mode. * Isar: simplified interfaces for outer syntax. Renamed OuterSyntax.add_keywords to OuterSyntax.keywords. Removed OuterSyntax.add_parsers -- this functionality is now included in OuterSyntax.command etc. INCOMPATIBILITY. * Simplifier: the simpset of a running simplification process now contains a proof context (cf. Simplifier.the_context), which is the very context that the initial simpset has been retrieved from (by simpset_of/local_simpset_of). Consequently, all plug-in components (solver, looper etc.) may depend on arbitrary proof data. * Simplifier.inherit_context inherits the proof context (plus the local bounds) of the current simplification process; any simproc etc. that calls the Simplifier recursively should do this! Removed former Simplifier.inherit_bounds, which is already included here -- INCOMPATIBILITY. Tools based on low-level rewriting may even have to specify an explicit context using Simplifier.context/theory_context. * Simplifier/Classical Reasoner: more abstract interfaces change_simpset/claset for modifying the simpset/claset reference of a theory; raw versions simpset/claset_ref etc. have been discontinued -- INCOMPATIBILITY. * Provers: more generic wrt. syntax of object-logics, avoid hardwired "Trueprop" etc. *** System *** * settings: the default heap location within ISABELLE_HOME_USER now includes ISABELLE_IDENTIFIER. This simplifies use of multiple Isabelle installations. * isabelle-process: option -S (secure mode) disables some critical operations, notably runtime compilation and evaluation of ML source code. * Basic Isabelle mode for jEdit, see Isabelle/lib/jedit/. * Support for parallel execution, using native multicore support of Poly/ML 5.1. The theory loader exploits parallelism when processing independent theories, according to the given theory header specifications. The maximum number of worker threads is specified via usedir option -M or the "max-threads" setting in Proof General. A speedup factor of 1.5--3.5 can be expected on a 4-core machine, and up to 6 on a 8-core machine. User-code needs to observe certain guidelines for thread-safe programming, see appendix A in the Isar Implementation manual. New in Isabelle2005 (October 2005) ---------------------------------- *** General *** * Theory headers: the new header syntax for Isar theories is theory imports ... uses ... begin where the 'uses' part is optional. The previous syntax theory = + ... + : will disappear in the next release. Use isatool fixheaders to convert existing theory files. Note that there is no change in ancient non-Isar theories now, but these will disappear soon. * Theory loader: parent theories can now also be referred to via relative and absolute paths. * Command 'find_theorems' searches for a list of criteria instead of a list of constants. Known criteria are: intro, elim, dest, name:string, simp:term, and any term. Criteria can be preceded by '-' to select theorems that do not match. Intro, elim, dest select theorems that match the current goal, name:s selects theorems whose fully qualified name contain s, and simp:term selects all simplification rules whose lhs match term. Any other term is interpreted as pattern and selects all theorems matching the pattern. Available in ProofGeneral under 'ProofGeneral -> Find Theorems' or C-c C-f. Example: C-c C-f (100) "(_::nat) + _ + _" intro -name: "HOL." prints the last 100 theorems matching the pattern "(_::nat) + _ + _", matching the current goal as introduction rule and not having "HOL." in their name (i.e. not being defined in theory HOL). * Command 'thms_containing' has been discontinued in favour of 'find_theorems'; INCOMPATIBILITY. * Communication with Proof General is now 8bit clean, which means that Unicode text in UTF-8 encoding may be used within theory texts (both formal and informal parts). Cf. option -U of the Isabelle Proof General interface. Here are some simple examples (cf. src/HOL/ex): http://isabelle.in.tum.de/library/HOL/ex/Hebrew.html http://isabelle.in.tum.de/library/HOL/ex/Chinese.html * Improved efficiency of the Simplifier and, to a lesser degree, the Classical Reasoner. Typical big applications run around 2 times faster. *** Document preparation *** * Commands 'display_drafts' and 'print_drafts' perform simple output of raw sources. Only those symbols that do not require additional LaTeX packages (depending on comments in isabellesym.sty) are displayed properly, everything else is left verbatim. isatool display and isatool print are used as front ends (these are subject to the DVI/PDF_VIEWER and PRINT_COMMAND settings, respectively). * Command tags control specific markup of certain regions of text, notably folding and hiding. Predefined tags include "theory" (for theory begin and end), "proof" for proof commands, and "ML" for commands involving ML code; the additional tags "visible" and "invisible" are unused by default. Users may give explicit tag specifications in the text, e.g. ''by %invisible (auto)''. The interpretation of tags is determined by the LaTeX job during document preparation: see option -V of isatool usedir, or options -n and -t of isatool document, or even the LaTeX macros \isakeeptag, \isafoldtag, \isadroptag. Several document versions may be produced at the same time via isatool usedir (the generated index.html will link all of them). Typical specifications include ''-V document=theory,proof,ML'' to present theory/proof/ML parts faithfully, ''-V outline=/proof,/ML'' to fold proof and ML commands, and ''-V mutilated=-theory,-proof,-ML'' to omit these parts without any formal replacement text. The Isabelle site default settings produce ''document'' and ''outline'' versions as specified above. * Several new antiquotations: @{term_type term} prints a term with its type annotated; @{typeof term} prints the type of a term; @{const const} is the same as @{term const}, but checks that the argument is a known logical constant; @{term_style style term} and @{thm_style style thm} print a term or theorem applying a "style" to it @{ML text} Predefined styles are 'lhs' and 'rhs' printing the lhs/rhs of definitions, equations, inequations etc., 'concl' printing only the conclusion of a meta-logical statement theorem, and 'prem1' .. 'prem19' to print the specified premise. TermStyle.add_style provides an ML interface for introducing further styles. See also the "LaTeX Sugar" document practical applications. The ML antiquotation prints type-checked ML expressions verbatim. * Markup commands 'chapter', 'section', 'subsection', 'subsubsection', and 'text' support optional locale specification '(in loc)', which specifies the default context for interpreting antiquotations. For example: 'text (in lattice) {* @{thm inf_assoc}*}'. * Option 'locale=NAME' of antiquotations specifies an alternative context interpreting the subsequent argument. For example: @{thm [locale=lattice] inf_assoc}. * Proper output of proof terms (@{prf ...} and @{full_prf ...}) within a proof context. * Proper output of antiquotations for theory commands involving a proof context (such as 'locale' or 'theorem (in loc) ...'). * Delimiters of outer tokens (string etc.) now produce separate LaTeX macros (\isachardoublequoteopen, isachardoublequoteclose etc.). * isatool usedir: new option -C (default true) controls whether option -D should include a copy of the original document directory; -C false prevents unwanted effects such as copying of administrative CVS data. *** Pure *** * Considerably improved version of 'constdefs' command. Now performs automatic type-inference of declared constants; additional support for local structure declarations (cf. locales and HOL records), see also isar-ref manual. Potential INCOMPATIBILITY: need to observe strictly sequential dependencies of definitions within a single 'constdefs' section; moreover, the declared name needs to be an identifier. If all fails, consider to fall back on 'consts' and 'defs' separately. * Improved indexed syntax and implicit structures. First of all, indexed syntax provides a notational device for subscripted application, using the new syntax \<^bsub>term\<^esub> for arbitrary expressions. Secondly, in a local context with structure declarations, number indexes \<^sub>n or the empty index (default number 1) refer to a certain fixed variable implicitly; option show_structs controls printing of implicit structures. Typical applications of these concepts involve record types and locales. * New command 'no_syntax' removes grammar declarations (and translations) resulting from the given syntax specification, which is interpreted in the same manner as for the 'syntax' command. * 'Advanced' translation functions (parse_translation etc.) may depend on the signature of the theory context being presently used for parsing/printing, see also isar-ref manual. * Improved 'oracle' command provides a type-safe interface to turn an ML expression of type theory -> T -> term into a primitive rule of type theory -> T -> thm (i.e. the functionality of Thm.invoke_oracle is already included here); see also FOL/ex/IffExample.thy; INCOMPATIBILITY. * axclass: name space prefix for class "c" is now "c_class" (was "c" before); "cI" is no longer bound, use "c.intro" instead. INCOMPATIBILITY. This change avoids clashes of fact bindings for axclasses vs. locales. * Improved internal renaming of symbolic identifiers -- attach primes instead of base 26 numbers. * New flag show_question_marks controls printing of leading question marks in schematic variable names. * In schematic variable names, *any* symbol following \<^isub> or \<^isup> is now treated as part of the base name. For example, the following works without printing of awkward ".0" indexes: lemma "x\<^isub>1 = x\<^isub>2 ==> x\<^isub>2 = x\<^isub>1" by simp * Inner syntax includes (*(*nested*) comments*). * Pretty printer now supports unbreakable blocks, specified in mixfix annotations as "(00...)". * Clear separation of logical types and nonterminals, where the latter may only occur in 'syntax' specifications or type abbreviations. Before that distinction was only partially implemented via type class "logic" vs. "{}". Potential INCOMPATIBILITY in rare cases of improper use of 'types'/'consts' instead of 'nonterminals'/'syntax'. Some very exotic syntax specifications may require further adaption (e.g. Cube/Cube.thy). * Removed obsolete type class "logic", use the top sort {} instead. Note that non-logical types should be declared as 'nonterminals' rather than 'types'. INCOMPATIBILITY for new object-logic specifications. * Attributes 'induct' and 'cases': type or set names may now be locally fixed variables as well. * Simplifier: can now control the depth to which conditional rewriting is traced via the PG menu Isabelle -> Settings -> Trace Simp Depth Limit. * Simplifier: simplification procedures may now take the current simpset into account (cf. Simplifier.simproc(_i) / mk_simproc interface), which is very useful for calling the Simplifier recursively. Minor INCOMPATIBILITY: the 'prems' argument of simprocs is gone -- use prems_of_ss on the simpset instead. Moreover, the low-level mk_simproc no longer applies Logic.varify internally, to allow for use in a context of fixed variables. * thin_tac now works even if the assumption being deleted contains !! or ==>. More generally, erule now works even if the major premise of the elimination rule contains !! or ==>. * Method 'rules' has been renamed to 'iprover'. INCOMPATIBILITY. * Reorganized bootstrapping of the Pure theories; CPure is now derived from Pure, which contains all common declarations already. Both theories are defined via plain Isabelle/Isar .thy files. INCOMPATIBILITY: elements of CPure (such as the CPure.intro / CPure.elim / CPure.dest attributes) now appear in the Pure name space; use isatool fixcpure to adapt your theory and ML sources. * New syntax 'name(i-j, i-, i, ...)' for referring to specific selections of theorems in named facts via index ranges. * 'print_theorems': in theory mode, really print the difference wrt. the last state (works for interactive theory development only), in proof mode print all local facts (cf. 'print_facts'); * 'hide': option '(open)' hides only base names. * More efficient treatment of intermediate checkpoints in interactive theory development. * Code generator is now invoked via code_module (incremental code generation) and code_library (modular code generation, ML structures for each theory). INCOMPATIBILITY: new keywords 'file' and 'contains' must be quoted when used as identifiers. * New 'value' command for reading, evaluating and printing terms using the code generator. INCOMPATIBILITY: command keyword 'value' must be quoted when used as identifier. *** Locales *** * New commands for the interpretation of locale expressions in theories (1), locales (2) and proof contexts (3). These generate proof obligations from the expression specification. After the obligations have been discharged, theorems of the expression are added to the theory, target locale or proof context. The synopsis of the commands is a follows: (1) interpretation expr inst (2) interpretation target < expr (3) interpret expr inst Interpretation in theories and proof contexts require a parameter instantiation of terms from the current context. This is applied to specifications and theorems of the interpreted expression. Interpretation in locales only permits parameter renaming through the locale expression. Interpretation is smart in that interpretations that are active already do not occur in proof obligations, neither are instantiated theorems stored in duplicate. Use 'print_interps' to inspect active interpretations of a particular locale. For details, see the Isar Reference manual. Examples can be found in HOL/Finite_Set.thy and HOL/Algebra/UnivPoly.thy. INCOMPATIBILITY: former 'instantiate' has been withdrawn, use 'interpret' instead. * New context element 'constrains' for adding type constraints to parameters. * Context expressions: renaming of parameters with syntax redeclaration. * Locale declaration: 'includes' disallowed. * Proper static binding of attribute syntax -- i.e. types / terms / facts mentioned as arguments are always those of the locale definition context, independently of the context of later invocations. Moreover, locale operations (renaming and type / term instantiation) are applied to attribute arguments as expected. INCOMPATIBILITY of the ML interface: always pass Attrib.src instead of actual attributes; rare situations may require Attrib.attribute to embed those attributes into Attrib.src that lack concrete syntax. Attribute implementations need to cooperate properly with the static binding mechanism. Basic parsers Args.XXX_typ/term/prop and Attrib.XXX_thm etc. already do the right thing without further intervention. Only unusual applications -- such as "where" or "of" (cf. src/Pure/Isar/attrib.ML), which process arguments depending both on the context and the facts involved -- may have to assign parsed values to argument tokens explicitly. * Changed parameter management in theorem generation for long goal statements with 'includes'. INCOMPATIBILITY: produces a different theorem statement in rare situations. * Locale inspection command 'print_locale' omits notes elements. Use 'print_locale!' to have them included in the output. *** Provers *** * Provers/hypsubst.ML: improved version of the subst method, for single-step rewriting: it now works in bound variable contexts. New is 'subst (asm)', for rewriting an assumption. INCOMPATIBILITY: may rewrite a different subterm than the original subst method, which is still available as 'simplesubst'. * Provers/quasi.ML: new transitivity reasoners for transitivity only and quasi orders. * Provers/trancl.ML: new transitivity reasoner for transitive and reflexive-transitive closure of relations. * Provers/blast.ML: new reference depth_limit to make blast's depth limit (previously hard-coded with a value of 20) user-definable. * Provers/simplifier.ML has been moved to Pure, where Simplifier.setup is peformed already. Object-logics merely need to finish their initial simpset configuration as before. INCOMPATIBILITY. *** HOL *** * Symbolic syntax of Hilbert Choice Operator is now as follows: syntax (epsilon) "_Eps" :: "[pttrn, bool] => 'a" ("(3\_./ _)" [0, 10] 10) The symbol \ is displayed as the alternative epsilon of LaTeX and x-symbol; use option '-m epsilon' to get it actually printed. Moreover, the mathematically important symbolic identifier \ becomes available as variable, constant etc. INCOMPATIBILITY, * "x > y" abbreviates "y < x" and "x >= y" abbreviates "y <= x". Similarly for all quantifiers: "ALL x > y" etc. The x-symbol for >= is \. New transitivity rules have been added to HOL/Orderings.thy to support corresponding Isar calculations. * "{x:A. P}" abbreviates "{x. x:A & P}", and similarly for "\" instead of ":". * theory SetInterval: changed the syntax for open intervals: Old New {..n(} {.. {\1<\.\.} \.\.\([^(}]*\)(} -> \.\.<\1} * Theory Commutative_Ring (in Library): method comm_ring for proving equalities in commutative rings; method 'algebra' provides a generic interface. * Theory Finite_Set: changed the syntax for 'setsum', summation over finite sets: "setsum (%x. e) A", which used to be "\x:A. e", is now either "SUM x:A. e" or "\x \ A. e". The bound variable can be a tuple pattern. Some new syntax forms are available: "\x | P. e" for "setsum (%x. e) {x. P}" "\x = a..b. e" for "setsum (%x. e) {a..b}" "\x = a..x < k. e" for "setsum (%x. e) {..x < k. e" used to be based on a separate function "Summation", which has been discontinued. * theory Finite_Set: in structured induction proofs, the insert case is now 'case (insert x F)' instead of the old counterintuitive 'case (insert F x)'. * The 'refute' command has been extended to support a much larger fragment of HOL, including axiomatic type classes, constdefs and typedefs, inductive datatypes and recursion. * New tactics 'sat' and 'satx' to prove propositional tautologies. Requires zChaff with proof generation to be installed. See HOL/ex/SAT_Examples.thy for examples. * Datatype induction via method 'induct' now preserves the name of the induction variable. For example, when proving P(xs::'a list) by induction on xs, the induction step is now P(xs) ==> P(a#xs) rather than P(list) ==> P(a#list) as previously. Potential INCOMPATIBILITY in unstructured proof scripts. * Reworked implementation of records. Improved scalability for records with many fields, avoiding performance problems for type inference. Records are no longer composed of nested field types, but of nested extension types. Therefore the record type only grows linear in the number of extensions and not in the number of fields. The top-level (users) view on records is preserved. Potential INCOMPATIBILITY only in strange cases, where the theory depends on the old record representation. The type generated for a record is called _ext_type. Flag record_quick_and_dirty_sensitive can be enabled to skip the proofs triggered by a record definition or a simproc (if quick_and_dirty is enabled). Definitions of large records can take quite long. New simproc record_upd_simproc for simplification of multiple record updates enabled by default. Moreover, trivial updates are also removed: r(|x := x r|) = r. INCOMPATIBILITY: old proofs break occasionally, since simplification is more powerful by default. * typedef: proper support for polymorphic sets, which contain extra type-variables in the term. * Simplifier: automatically reasons about transitivity chains involving "trancl" (r^+) and "rtrancl" (r^*) by setting up tactics provided by Provers/trancl.ML as additional solvers. INCOMPATIBILITY: old proofs break occasionally as simplification may now solve more goals than previously. * Simplifier: converts x <= y into x = y if assumption y <= x is present. Works for all partial orders (class "order"), in particular numbers and sets. For linear orders (e.g. numbers) it treats ~ x < y just like y <= x. * Simplifier: new simproc for "let x = a in f x". If a is a free or bound variable or a constant then the let is unfolded. Otherwise first a is simplified to b, and then f b is simplified to g. If possible we abstract b from g arriving at "let x = b in h x", otherwise we unfold the let and arrive at g. The simproc can be enabled/disabled by the reference use_let_simproc. Potential INCOMPATIBILITY since simplification is more powerful by default. * Classical reasoning: the meson method now accepts theorems as arguments. * Prover support: pre-release of the Isabelle-ATP linkup, which runs background jobs to provide advice on the provability of subgoals. * Theory OrderedGroup and Ring_and_Field: various additions and improvements to faciliate calculations involving equalities and inequalities. The following theorems have been eliminated or modified (INCOMPATIBILITY): abs_eq now named abs_of_nonneg abs_of_ge_0 now named abs_of_nonneg abs_minus_eq now named abs_of_nonpos imp_abs_id now named abs_of_nonneg imp_abs_neg_id now named abs_of_nonpos mult_pos now named mult_pos_pos mult_pos_le now named mult_nonneg_nonneg mult_pos_neg_le now named mult_nonneg_nonpos mult_pos_neg2_le now named mult_nonneg_nonpos2 mult_neg now named mult_neg_neg mult_neg_le now named mult_nonpos_nonpos * The following lemmas in Ring_and_Field have been added to the simplifier: zero_le_square not_square_less_zero The following lemmas have been deleted from Real/RealPow: realpow_zero_zero realpow_two realpow_less zero_le_power realpow_two_le abs_realpow_two realpow_two_abs * Theory Parity: added rules for simplifying exponents. * Theory List: The following theorems have been eliminated or modified (INCOMPATIBILITY): list_all_Nil now named list_all.simps(1) list_all_Cons now named list_all.simps(2) list_all_conv now named list_all_iff set_mem_eq now named mem_iff * Theories SetsAndFunctions and BigO (see HOL/Library) support asymptotic "big O" calculations. See the notes in BigO.thy. *** HOL-Complex *** * Theory RealDef: better support for embedding natural numbers and integers in the reals. The following theorems have been eliminated or modified (INCOMPATIBILITY): exp_ge_add_one_self now requires no hypotheses real_of_int_add reversed direction of equality (use [symmetric]) real_of_int_minus reversed direction of equality (use [symmetric]) real_of_int_diff reversed direction of equality (use [symmetric]) real_of_int_mult reversed direction of equality (use [symmetric]) * Theory RComplete: expanded support for floor and ceiling functions. * Theory Ln is new, with properties of the natural logarithm * Hyperreal: There is a new type constructor "star" for making nonstandard types. The old type names are now type synonyms: hypreal = real star hypnat = nat star hcomplex = complex star * Hyperreal: Many groups of similarly-defined constants have been replaced by polymorphic versions (INCOMPATIBILITY): star_of <-- hypreal_of_real, hypnat_of_nat, hcomplex_of_complex starset <-- starsetNat, starsetC *s* <-- *sNat*, *sc* starset_n <-- starsetNat_n, starsetC_n *sn* <-- *sNatn*, *scn* InternalSets <-- InternalNatSets, InternalCSets starfun <-- starfun{Nat,Nat2,C,RC,CR} *f* <-- *fNat*, *fNat2*, *fc*, *fRc*, *fcR* starfun_n <-- starfun{Nat,Nat2,C,RC,CR}_n *fn* <-- *fNatn*, *fNat2n*, *fcn*, *fRcn*, *fcRn* InternalFuns <-- InternalNatFuns, InternalNatFuns2, Internal{C,RC,CR}Funs * Hyperreal: Many type-specific theorems have been removed in favor of theorems specific to various axiomatic type classes (INCOMPATIBILITY): add_commute <-- {hypreal,hypnat,hcomplex}_add_commute add_assoc <-- {hypreal,hypnat,hcomplex}_add_assocs OrderedGroup.add_0 <-- {hypreal,hypnat,hcomplex}_add_zero_left OrderedGroup.add_0_right <-- {hypreal,hcomplex}_add_zero_right right_minus <-- hypreal_add_minus left_minus <-- {hypreal,hcomplex}_add_minus_left mult_commute <-- {hypreal,hypnat,hcomplex}_mult_commute mult_assoc <-- {hypreal,hypnat,hcomplex}_mult_assoc mult_1_left <-- {hypreal,hypnat}_mult_1, hcomplex_mult_one_left mult_1_right <-- hcomplex_mult_one_right mult_zero_left <-- hcomplex_mult_zero_left left_distrib <-- {hypreal,hypnat,hcomplex}_add_mult_distrib right_distrib <-- hypnat_add_mult_distrib2 zero_neq_one <-- {hypreal,hypnat,hcomplex}_zero_not_eq_one right_inverse <-- hypreal_mult_inverse left_inverse <-- hypreal_mult_inverse_left, hcomplex_mult_inv_left order_refl <-- {hypreal,hypnat}_le_refl order_trans <-- {hypreal,hypnat}_le_trans order_antisym <-- {hypreal,hypnat}_le_anti_sym order_less_le <-- {hypreal,hypnat}_less_le linorder_linear <-- {hypreal,hypnat}_le_linear add_left_mono <-- {hypreal,hypnat}_add_left_mono mult_strict_left_mono <-- {hypreal,hypnat}_mult_less_mono2 add_nonneg_nonneg <-- hypreal_le_add_order * Hyperreal: Separate theorems having to do with type-specific versions of constants have been merged into theorems that apply to the new polymorphic constants (INCOMPATIBILITY): STAR_UNIV_set <-- {STAR_real,NatStar_real,STARC_complex}_set STAR_empty_set <-- {STAR,NatStar,STARC}_empty_set STAR_Un <-- {STAR,NatStar,STARC}_Un STAR_Int <-- {STAR,NatStar,STARC}_Int STAR_Compl <-- {STAR,NatStar,STARC}_Compl STAR_subset <-- {STAR,NatStar,STARC}_subset STAR_mem <-- {STAR,NatStar,STARC}_mem STAR_mem_Compl <-- {STAR,STARC}_mem_Compl STAR_diff <-- {STAR,STARC}_diff STAR_star_of_image_subset <-- {STAR_hypreal_of_real, NatStar_hypreal_of_real, STARC_hcomplex_of_complex}_image_subset starset_n_Un <-- starset{Nat,C}_n_Un starset_n_Int <-- starset{Nat,C}_n_Int starset_n_Compl <-- starset{Nat,C}_n_Compl starset_n_diff <-- starset{Nat,C}_n_diff InternalSets_Un <-- Internal{Nat,C}Sets_Un InternalSets_Int <-- Internal{Nat,C}Sets_Int InternalSets_Compl <-- Internal{Nat,C}Sets_Compl InternalSets_diff <-- Internal{Nat,C}Sets_diff InternalSets_UNIV_diff <-- Internal{Nat,C}Sets_UNIV_diff InternalSets_starset_n <-- Internal{Nat,C}Sets_starset{Nat,C}_n starset_starset_n_eq <-- starset{Nat,C}_starset{Nat,C}_n_eq starset_n_starset <-- starset{Nat,C}_n_starset{Nat,C} starfun_n_starfun <-- starfun{Nat,Nat2,C,RC,CR}_n_starfun{Nat,Nat2,C,RC,CR} starfun <-- starfun{Nat,Nat2,C,RC,CR} starfun_mult <-- starfun{Nat,Nat2,C,RC,CR}_mult starfun_add <-- starfun{Nat,Nat2,C,RC,CR}_add starfun_minus <-- starfun{Nat,Nat2,C,RC,CR}_minus starfun_diff <-- starfun{C,RC,CR}_diff starfun_o <-- starfun{NatNat2,Nat2,_stafunNat,C,C_starfunRC,_starfunCR}_o starfun_o2 <-- starfun{NatNat2,_stafunNat,C,C_starfunRC,_starfunCR}_o2 starfun_const_fun <-- starfun{Nat,Nat2,C,RC,CR}_const_fun starfun_inverse <-- starfun{Nat,C,RC,CR}_inverse starfun_eq <-- starfun{Nat,Nat2,C,RC,CR}_eq starfun_eq_iff <-- starfun{C,RC,CR}_eq_iff starfun_Id <-- starfunC_Id starfun_approx <-- starfun{Nat,CR}_approx starfun_capprox <-- starfun{C,RC}_capprox starfun_abs <-- starfunNat_rabs starfun_lambda_cancel <-- starfun{C,CR,RC}_lambda_cancel starfun_lambda_cancel2 <-- starfun{C,CR,RC}_lambda_cancel2 starfun_mult_HFinite_approx <-- starfunCR_mult_HFinite_capprox starfun_mult_CFinite_capprox <-- starfun{C,RC}_mult_CFinite_capprox starfun_add_capprox <-- starfun{C,RC}_add_capprox starfun_add_approx <-- starfunCR_add_approx starfun_inverse_inverse <-- starfunC_inverse_inverse starfun_divide <-- starfun{C,CR,RC}_divide starfun_n <-- starfun{Nat,C}_n starfun_n_mult <-- starfun{Nat,C}_n_mult starfun_n_add <-- starfun{Nat,C}_n_add starfun_n_add_minus <-- starfunNat_n_add_minus starfun_n_const_fun <-- starfun{Nat,C}_n_const_fun starfun_n_minus <-- starfun{Nat,C}_n_minus starfun_n_eq <-- starfun{Nat,C}_n_eq star_n_add <-- {hypreal,hypnat,hcomplex}_add star_n_minus <-- {hypreal,hcomplex}_minus star_n_diff <-- {hypreal,hcomplex}_diff star_n_mult <-- {hypreal,hcomplex}_mult star_n_inverse <-- {hypreal,hcomplex}_inverse star_n_le <-- {hypreal,hypnat}_le star_n_less <-- {hypreal,hypnat}_less star_n_zero_num <-- {hypreal,hypnat,hcomplex}_zero_num star_n_one_num <-- {hypreal,hypnat,hcomplex}_one_num star_n_abs <-- hypreal_hrabs star_n_divide <-- hcomplex_divide star_of_add <-- {hypreal_of_real,hypnat_of_nat,hcomplex_of_complex}_add star_of_minus <-- {hypreal_of_real,hcomplex_of_complex}_minus star_of_diff <-- hypreal_of_real_diff star_of_mult <-- {hypreal_of_real,hypnat_of_nat,hcomplex_of_complex}_mult star_of_one <-- {hypreal_of_real,hcomplex_of_complex}_one star_of_zero <-- {hypreal_of_real,hypnat_of_nat,hcomplex_of_complex}_zero star_of_le <-- {hypreal_of_real,hypnat_of_nat}_le_iff star_of_less <-- {hypreal_of_real,hypnat_of_nat}_less_iff star_of_eq <-- {hypreal_of_real,hypnat_of_nat,hcomplex_of_complex}_eq_iff star_of_inverse <-- {hypreal_of_real,hcomplex_of_complex}_inverse star_of_divide <-- {hypreal_of_real,hcomplex_of_complex}_divide star_of_of_nat <-- {hypreal_of_real,hcomplex_of_complex}_of_nat star_of_of_int <-- {hypreal_of_real,hcomplex_of_complex}_of_int star_of_number_of <-- {hypreal,hcomplex}_number_of star_of_number_less <-- number_of_less_hypreal_of_real_iff star_of_number_le <-- number_of_le_hypreal_of_real_iff star_of_eq_number <-- hypreal_of_real_eq_number_of_iff star_of_less_number <-- hypreal_of_real_less_number_of_iff star_of_le_number <-- hypreal_of_real_le_number_of_iff star_of_power <-- hypreal_of_real_power star_of_eq_0 <-- hcomplex_of_complex_zero_iff * Hyperreal: new method "transfer" that implements the transfer principle of nonstandard analysis. With a subgoal that mentions nonstandard types like "'a star", the command "apply transfer" replaces it with an equivalent one that mentions only standard types. To be successful, all free variables must have standard types; non- standard variables must have explicit universal quantifiers. * Hyperreal: A theory of Taylor series. *** HOLCF *** * Discontinued special version of 'constdefs' (which used to support continuous functions) in favor of the general Pure one with full type-inference. * New simplification procedure for solving continuity conditions; it is much faster on terms with many nested lambda abstractions (cubic instead of exponential time). * New syntax for domain package: selector names are now optional. Parentheses should be omitted unless argument is lazy, for example: domain 'a stream = cons "'a" (lazy "'a stream") * New command 'fixrec' for defining recursive functions with pattern matching; defining multiple functions with mutual recursion is also supported. Patterns may include the constants cpair, spair, up, sinl, sinr, or any data constructor defined by the domain package. The given equations are proven as rewrite rules. See HOLCF/ex/Fixrec_ex.thy for syntax and examples. * New commands 'cpodef' and 'pcpodef' for defining predicate subtypes of cpo and pcpo types. Syntax is exactly like the 'typedef' command, but the proof obligation additionally includes an admissibility requirement. The packages generate instances of class cpo or pcpo, with continuity and strictness theorems for Rep and Abs. * HOLCF: Many theorems have been renamed according to a more standard naming scheme (INCOMPATIBILITY): foo_inject: "foo$x = foo$y ==> x = y" foo_eq: "(foo$x = foo$y) = (x = y)" foo_less: "(foo$x << foo$y) = (x << y)" foo_strict: "foo$UU = UU" foo_defined: "... ==> foo$x ~= UU" foo_defined_iff: "(foo$x = UU) = (x = UU)" *** ZF *** * ZF/ex: theories Group and Ring provide examples in abstract algebra, including the First Isomorphism Theorem (on quotienting by the kernel of a homomorphism). * ZF/Simplifier: install second copy of type solver that actually makes use of TC rules declared to Isar proof contexts (or locales); the old version is still required for ML proof scripts. *** Cube *** * Converted to Isar theory format; use locales instead of axiomatic theories. *** ML *** * Pure/library.ML: added ##>, ##>>, #>> -- higher-order counterparts for ||>, ||>>, |>>, * Pure/library.ML no longer defines its own option datatype, but uses that of the SML basis, which has constructors NONE and SOME instead of None and Some, as well as exception Option.Option instead of OPTION. The functions the, if_none, is_some, is_none have been adapted accordingly, while Option.map replaces apsome. * Pure/library.ML: the exception LIST has been given up in favour of the standard exceptions Empty and Subscript, as well as Library.UnequalLengths. Function like Library.hd and Library.tl are superceded by the standard hd and tl functions etc. A number of basic list functions are no longer exported to the ML toplevel, as they are variants of predefined functions. The following suggests how one can translate existing code: rev_append xs ys = List.revAppend (xs, ys) nth_elem (i, xs) = List.nth (xs, i) last_elem xs = List.last xs flat xss = List.concat xss seq fs = List.app fs partition P xs = List.partition P xs mapfilter f xs = List.mapPartial f xs * Pure/library.ML: several combinators for linear functional transformations, notably reverse application and composition: x |> f f #> g (x, y) |-> f f #-> g * Pure/library.ML: introduced/changed precedence of infix operators: infix 1 |> |-> ||> ||>> |>> |>>> #> #->; infix 2 ?; infix 3 o oo ooo oooo; infix 4 ~~ upto downto; Maybe INCOMPATIBILITY when any of those is used in conjunction with other infix operators. * Pure/library.ML: natural list combinators fold, fold_rev, and fold_map support linear functional transformations and nesting. For example: fold f [x1, ..., xN] y = y |> f x1 |> ... |> f xN (fold o fold) f [xs1, ..., xsN] y = y |> fold f xs1 |> ... |> fold f xsN fold f [x1, ..., xN] = f x1 #> ... #> f xN (fold o fold) f [xs1, ..., xsN] = fold f xs1 #> ... #> fold f xsN * Pure/library.ML: the following selectors on type 'a option are available: the: 'a option -> 'a (*partial*) these: 'a option -> 'a where 'a = 'b list the_default: 'a -> 'a option -> 'a the_list: 'a option -> 'a list * Pure/General: structure AList (cf. Pure/General/alist.ML) provides basic operations for association lists, following natural argument order; moreover the explicit equality predicate passed here avoids potentially expensive polymorphic runtime equality checks. The old functions may be expressed as follows: assoc = uncurry (AList.lookup (op =)) assocs = these oo AList.lookup (op =) overwrite = uncurry (AList.update (op =)) o swap * Pure/General: structure AList (cf. Pure/General/alist.ML) provides val make: ('a -> 'b) -> 'a list -> ('a * 'b) list val find: ('a * 'b -> bool) -> ('c * 'b) list -> 'a -> 'c list replacing make_keylist and keyfilter (occassionally used) Naive rewrites: make_keylist = AList.make keyfilter = AList.find (op =) * eq_fst and eq_snd now take explicit equality parameter, thus avoiding eqtypes. Naive rewrites: eq_fst = eq_fst (op =) eq_snd = eq_snd (op =) * Removed deprecated apl and apr (rarely used). Naive rewrites: apl (n, op) =>>= curry op n apr (op, m) =>>= fn n => op (n, m) * Pure/General: structure OrdList (cf. Pure/General/ord_list.ML) provides a reasonably efficient light-weight implementation of sets as lists. * Pure/General: generic tables (cf. Pure/General/table.ML) provide a few new operations; existing lookup and update are now curried to follow natural argument order (for use with fold etc.); INCOMPATIBILITY, use (uncurry Symtab.lookup) etc. as last resort. * Pure/General: output via the Isabelle channels of writeln/warning/error etc. is now passed through Output.output, with a hook for arbitrary transformations depending on the print_mode (cf. Output.add_mode -- the first active mode that provides a output function wins). Already formatted output may be embedded into further text via Output.raw; the result of Pretty.string_of/str_of and derived functions (string_of_term/cterm/thm etc.) is already marked raw to accommodate easy composition of diagnostic messages etc. Programmers rarely need to care about Output.output or Output.raw at all, with some notable exceptions: Output.output is required when bypassing the standard channels (writeln etc.), or in token translations to produce properly formatted results; Output.raw is required when capturing already output material that will eventually be presented to the user a second time. For the default print mode, both Output.output and Output.raw have no effect. * Pure/General: Output.time_accumulator NAME creates an operator ('a -> 'b) -> 'a -> 'b to measure runtime and count invocations; the cumulative results are displayed at the end of a batch session. * Pure/General: File.sysify_path and File.quote_sysify path have been replaced by File.platform_path and File.shell_path (with appropriate hooks). This provides a clean interface for unusual systems where the internal and external process view of file names are different. * Pure: more efficient orders for basic syntactic entities: added fast_string_ord, fast_indexname_ord, fast_term_ord; changed sort_ord and typ_ord to use fast_string_ord and fast_indexname_ord (term_ord is NOT affected); structures Symtab, Vartab, Typtab, Termtab use the fast orders now -- potential INCOMPATIBILITY for code that depends on a particular order for Symtab.keys, Symtab.dest, etc. (consider using Library.sort_strings on result). * Pure/term.ML: combinators fold_atyps, fold_aterms, fold_term_types, fold_types traverse types/terms from left to right, observing natural argument order. Supercedes previous foldl_XXX versions, add_frees, add_vars etc. have been adapted as well: INCOMPATIBILITY. * Pure: name spaces have been refined, with significant changes of the internal interfaces -- INCOMPATIBILITY. Renamed cond_extern(_table) to extern(_table). The plain name entry path is superceded by a general 'naming' context, which also includes the 'policy' to produce a fully qualified name and external accesses of a fully qualified name; NameSpace.extend is superceded by context dependent Sign.declare_name. Several theory and proof context operations modify the naming context. Especially note Theory.restore_naming and ProofContext.restore_naming to get back to a sane state; note that Theory.add_path is no longer sufficient to recover from Theory.absolute_path in particular. * Pure: new flags short_names (default false) and unique_names (default true) for controlling output of qualified names. If short_names is set, names are printed unqualified. If unique_names is reset, the name prefix is reduced to the minimum required to achieve the original result when interning again, even if there is an overlap with earlier declarations. * Pure/TheoryDataFun: change of the argument structure; 'prep_ext' is now 'extend', and 'merge' gets an additional Pretty.pp argument (useful for printing error messages). INCOMPATIBILITY. * Pure: major reorganization of the theory context. Type Sign.sg and Theory.theory are now identified, referring to the universal Context.theory (see Pure/context.ML). Actual signature and theory content is managed as theory data. The old code and interfaces were spread over many files and structures; the new arrangement introduces considerable INCOMPATIBILITY to gain more clarity: Context -- theory management operations (name, identity, inclusion, parents, ancestors, merge, etc.), plus generic theory data; Sign -- logical signature and syntax operations (declaring consts, types, etc.), plus certify/read for common entities; Theory -- logical theory operations (stating axioms, definitions, oracles), plus a copy of logical signature operations (consts, types, etc.); also a few basic management operations (Theory.copy, Theory.merge, etc.) The most basic sign_of operations (Theory.sign_of, Thm.sign_of_thm etc.) as well as the sign field in Thm.rep_thm etc. have been retained for convenience -- they merely return the theory. * Pure: type Type.tsig is superceded by theory in most interfaces. * Pure: the Isar proof context type is already defined early in Pure as Context.proof (note that ProofContext.context and Proof.context are aliases, where the latter is the preferred name). This enables other Isabelle components to refer to that type even before Isar is present. * Pure/sign/theory: discontinued named name spaces (i.e. classK, typeK, constK, axiomK, oracleK), but provide explicit operations for any of these kinds. For example, Sign.intern typeK is now Sign.intern_type, Theory.hide_space Sign.typeK is now Theory.hide_types. Also note that former Theory.hide_classes/types/consts are now Theory.hide_classes_i/types_i/consts_i, while the non '_i' versions internalize their arguments! INCOMPATIBILITY. * Pure: get_thm interface (of PureThy and ProofContext) expects datatype thmref (with constructors Name and NameSelection) instead of plain string -- INCOMPATIBILITY; * Pure: cases produced by proof methods specify options, where NONE means to remove case bindings -- INCOMPATIBILITY in (RAW_)METHOD_CASES. * Pure: the following operations retrieve axioms or theorems from a theory node or theory hierarchy, respectively: Theory.axioms_of: theory -> (string * term) list Theory.all_axioms_of: theory -> (string * term) list PureThy.thms_of: theory -> (string * thm) list PureThy.all_thms_of: theory -> (string * thm) list * Pure: print_tac now outputs the goal through the trace channel. * Isar toplevel: improved diagnostics, mostly for Poly/ML only. Reference Toplevel.debug (default false) controls detailed printing and tracing of low-level exceptions; Toplevel.profiling (default 0) controls execution profiling -- set to 1 for time and 2 for space (both increase the runtime). * Isar session: The initial use of ROOT.ML is now always timed, i.e. the log will show the actual process times, in contrast to the elapsed wall-clock time that the outer shell wrapper produces. * Simplifier: improved handling of bound variables (nameless representation, avoid allocating new strings). Simprocs that invoke the Simplifier recursively should use Simplifier.inherit_bounds to avoid local name clashes. Failure to do so produces warnings "Simplifier: renamed bound variable ..."; set Simplifier.debug_bounds for further details. * ML functions legacy_bindings and use_legacy_bindings produce ML fact bindings for all theorems stored within a given theory; this may help in porting non-Isar theories to Isar ones, while keeping ML proof scripts for the time being. * ML operator HTML.with_charset specifies the charset begin used for generated HTML files. For example: HTML.with_charset "utf-8" use_thy "Hebrew"; HTML.with_charset "utf-8" use_thy "Chinese"; *** System *** * Allow symlinks to all proper Isabelle executables (Isabelle, isabelle, isatool etc.). * ISABELLE_DOC_FORMAT setting specifies preferred document format (for isatool doc, isatool mkdir, display_drafts etc.). * isatool usedir: option -f allows specification of the ML file to be used by Isabelle; default is ROOT.ML. * New isatool version outputs the version identifier of the Isabelle distribution being used. * HOL: new isatool dimacs2hol converts files in DIMACS CNF format (containing Boolean satisfiability problems) into Isabelle/HOL theories. New in Isabelle2004 (April 2004) -------------------------------- *** General *** * Provers/order.ML: new efficient reasoner for partial and linear orders. Replaces linorder.ML. * Pure: Greek letters (except small lambda, \), as well as Gothic (\...\\...\), calligraphic (\...\), and Euler (\...\), are now considered normal letters, and can therefore be used anywhere where an ASCII letter (a...zA...Z) has until now. COMPATIBILITY: This obviously changes the parsing of some terms, especially where a symbol has been used as a binder, say '\x. ...', which is now a type error since \x will be parsed as an identifier. Fix it by inserting a space around former symbols. Call 'isatool fixgreek' to try to fix parsing errors in existing theory and ML files. * Pure: Macintosh and Windows line-breaks are now allowed in theory files. * Pure: single letter sub/superscripts (\<^isub> and \<^isup>) are now allowed in identifiers. Similar to Greek letters \<^isub> is now considered a normal (but invisible) letter. For multiple letter subscripts repeat \<^isub> like this: x\<^isub>1\<^isub>2. * Pure: There are now sub-/superscripts that can span more than one character. Text between \<^bsub> and \<^esub> is set in subscript in ProofGeneral and LaTeX, text between \<^bsup> and \<^esup> in superscript. The new control characters are not identifier parts. * Pure: Control-symbols of the form \<^raw:...> will literally print the content of "..." to the latex file instead of \isacntrl... . The "..." may consist of any printable characters excluding the end bracket >. * Pure: Using new Isar command "finalconsts" (or the ML functions Theory.add_finals or Theory.add_finals_i) it is now possible to declare constants "final", which prevents their being given a definition later. It is useful for constants whose behaviour is fixed axiomatically rather than definitionally, such as the meta-logic connectives. * Pure: 'instance' now handles general arities with general sorts (i.e. intersections of classes), * Presentation: generated HTML now uses a CSS style sheet to make layout (somewhat) independent of content. It is copied from lib/html/isabelle.css. It can be changed to alter the colors/layout of generated pages. *** Isar *** * Tactic emulation methods rule_tac, erule_tac, drule_tac, frule_tac, cut_tac, subgoal_tac and thin_tac: - Now understand static (Isar) contexts. As a consequence, users of Isar locales are no longer forced to write Isar proof scripts. For details see Isar Reference Manual, paragraph 4.3.2: Further tactic emulations. - INCOMPATIBILITY: names of variables to be instantiated may no longer be enclosed in quotes. Instead, precede variable name with `?'. This is consistent with the instantiation attribute "where". * Attributes "where" and "of": - Now take type variables of instantiated theorem into account when reading the instantiation string. This fixes a bug that caused instantiated theorems to have too special types in some circumstances. - "where" permits explicit instantiations of type variables. * Calculation commands "moreover" and "also" no longer interfere with current facts ("this"), admitting arbitrary combinations with "then" and derived forms. * Locales: - Goal statements involving the context element "includes" no longer generate theorems with internal delta predicates (those ending on "_axioms") in the premise. Resolve particular premise with .intro to obtain old form. - Fixed bug in type inference ("unify_frozen") that prevented mix of target specification and "includes" elements in goal statement. - Rule sets .intro and .axioms no longer declared as [intro?] and [elim?] (respectively) by default. - Experimental command for instantiation of locales in proof contexts: instantiate
: "(x, y) \ r" show "X = Y" by (rule quotient_eqI) (use \
assms in \blast+\) qed lemma eq_equiv_class_iff2: "equiv A r \ x \ A \ y \ A \ {x}//r = {y}//r \ (x, y) \ r" by (simp add: quotient_def eq_equiv_class_iff) lemma quotient_empty [simp]: "{}//r = {}" by (simp add: quotient_def) lemma quotient_is_empty [iff]: "A//r = {} \ A = {}" by (simp add: quotient_def) lemma quotient_is_empty2 [iff]: "{} = A//r \ A = {}" by (simp add: quotient_def) lemma singleton_quotient: "{x}//r = {r `` {x}}" by (simp add: quotient_def) lemma quotient_diff1: "inj_on (\a. {a}//r) A \ a \ A \ (A - {a})//r = A//r - {a}//r" unfolding quotient_def inj_on_def by blast subsection \Refinement of one equivalence relation WRT another\ lemma refines_equiv_class_eq: "R \ S \ equiv A R \ equiv A S \ R``(S``{a}) = S``{a}" by (auto simp: equiv_class_eq_iff) lemma refines_equiv_class_eq2: "R \ S \ equiv A R \ equiv A S \ S``(R``{a}) = S``{a}" by (auto simp: equiv_class_eq_iff) lemma refines_equiv_image_eq: "R \ S \ equiv A R \ equiv A S \ (\X. S``X) ` (A//R) = A//S" by (auto simp: quotient_def image_UN refines_equiv_class_eq2) lemma finite_refines_finite: "finite (A//R) \ R \ S \ equiv A R \ equiv A S \ finite (A//S)" by (erule finite_surj [where f = "\X. S``X"]) (simp add: refines_equiv_image_eq) lemma finite_refines_card_le: "finite (A//R) \ R \ S \ equiv A R \ equiv A S \ card (A//S) \ card (A//R)" by (subst refines_equiv_image_eq [of R S A, symmetric]) (auto simp: card_image_le [where f = "\X. S``X"]) subsection \Defining unary operations upon equivalence classes\ text \A congruence-preserving function.\ definition congruent :: "('a \ 'a) set \ ('a \ 'b) \ bool" where "congruent r f \ (\(y, z) \ r. f y = f z)" lemma congruentI: "(\y z. (y, z) \ r \ f y = f z) \ congruent r f" by (auto simp add: congruent_def) lemma congruentD: "congruent r f \ (y, z) \ r \ f y = f z" by (auto simp add: congruent_def) abbreviation RESPECTS :: "('a \ 'b) \ ('a \ 'a) set \ bool" (infixr "respects" 80) where "f respects r \ congruent r f" lemma UN_constant_eq: "a \ A \ \y \ A. f y = c \ (\y \ A. f y) = c" \ \lemma required to prove \UN_equiv_class\\ by auto lemma UN_equiv_class: assumes "equiv A r" "f respects r" "a \ A" shows "(\x \ r``{a}. f x) = f a" \ \Conversion rule\ proof - have \
: "\x\r `` {a}. f x = f a" using assms unfolding equiv_def congruent_def sym_def by blast show ?thesis by (iprover intro: assms UN_constant_eq [OF equiv_class_self \
]) qed lemma UN_equiv_class_type: assumes r: "equiv A r" "f respects r" and X: "X \ A//r" and AB: "\x. x \ A \ f x \ B" shows "(\x \ X. f x) \ B" using assms unfolding quotient_def by (auto simp: UN_equiv_class [OF r]) text \ Sufficient conditions for injectiveness. Could weaken premises! major premise could be an inclusion; \bcong\ could be \\y. y \ A \ f y \ B\. \ lemma UN_equiv_class_inject: assumes "equiv A r" "f respects r" and eq: "(\x \ X. f x) = (\y \ Y. f y)" and X: "X \ A//r" and Y: "Y \ A//r" and fr: "\x y. x \ A \ y \ A \ f x = f y \ (x, y) \ r" shows "X = Y" proof - obtain a b where "a \ A" and a: "X = r `` {a}" and "b \ A" and b: "Y = r `` {b}" using assms by (auto elim!: quotientE) then have "\ (f ` r `` {a}) = f a" "\ (f ` r `` {b}) = f b" by (iprover intro: UN_equiv_class [OF \equiv A r\] assms)+ then have "f a = f b" using eq unfolding a b by (iprover intro: trans sym) then have "(a,b) \ r" using fr \a \ A\ \b \ A\ by blast then show ?thesis unfolding a b by (rule equiv_class_eq [OF \equiv A r\]) qed subsection \Defining binary operations upon equivalence classes\ text \A congruence-preserving function of two arguments.\ definition congruent2 :: "('a \ 'a) set \ ('b \ 'b) set \ ('a \ 'b \ 'c) \ bool" where "congruent2 r1 r2 f \ (\(y1, z1) \ r1. \(y2, z2) \ r2. f y1 y2 = f z1 z2)" lemma congruent2I': assumes "\y1 z1 y2 z2. (y1, z1) \ r1 \ (y2, z2) \ r2 \ f y1 y2 = f z1 z2" shows "congruent2 r1 r2 f" using assms by (auto simp add: congruent2_def) lemma congruent2D: "congruent2 r1 r2 f \ (y1, z1) \ r1 \ (y2, z2) \ r2 \ f y1 y2 = f z1 z2" by (auto simp add: congruent2_def) text \Abbreviation for the common case where the relations are identical.\ abbreviation RESPECTS2:: "('a \ 'a \ 'b) \ ('a \ 'a) set \ bool" (infixr "respects2" 80) where "f respects2 r \ congruent2 r r f" lemma congruent2_implies_congruent: "equiv A r1 \ congruent2 r1 r2 f \ a \ A \ congruent r2 (f a)" unfolding congruent_def congruent2_def equiv_def refl_on_def by blast lemma congruent2_implies_congruent_UN: assumes "equiv A1 r1" "equiv A2 r2" "congruent2 r1 r2 f" "a \ A2" shows "congruent r1 (\x1. \x2 \ r2``{a}. f x1 x2)" unfolding congruent_def proof clarify fix c d assume cd: "(c,d) \ r1" then have "c \ A1" "d \ A1" using \equiv A1 r1\ by (auto elim!: equiv_type [THEN subsetD, THEN SigmaE2]) with assms show "\ (f c ` r2 `` {a}) = \ (f d ` r2 `` {a})" proof (simp add: UN_equiv_class congruent2_implies_congruent) show "f c a = f d a" using assms cd unfolding congruent2_def equiv_def refl_on_def by blast qed qed lemma UN_equiv_class2: "equiv A1 r1 \ equiv A2 r2 \ congruent2 r1 r2 f \ a1 \ A1 \ a2 \ A2 \ (\x1 \ r1``{a1}. \x2 \ r2``{a2}. f x1 x2) = f a1 a2" by (simp add: UN_equiv_class congruent2_implies_congruent congruent2_implies_congruent_UN) lemma UN_equiv_class_type2: "equiv A1 r1 \ equiv A2 r2 \ congruent2 r1 r2 f \ X1 \ A1//r1 \ X2 \ A2//r2 \ (\x1 x2. x1 \ A1 \ x2 \ A2 \ f x1 x2 \ B) \ (\x1 \ X1. \x2 \ X2. f x1 x2) \ B" unfolding quotient_def by (blast intro: UN_equiv_class_type congruent2_implies_congruent_UN congruent2_implies_congruent quotientI) lemma UN_UN_split_split_eq: "(\(x1, x2) \ X. \(y1, y2) \ Y. A x1 x2 y1 y2) = (\x \ X. \y \ Y. (\(x1, x2). (\(y1, y2). A x1 x2 y1 y2) y) x)" \ \Allows a natural expression of binary operators,\ \ \without explicit calls to \split\\ by auto lemma congruent2I: "equiv A1 r1 \ equiv A2 r2 \ (\y z w. w \ A2 \ (y,z) \ r1 \ f y w = f z w) \ (\y z w. w \ A1 \ (y,z) \ r2 \ f w y = f w z) \ congruent2 r1 r2 f" \ \Suggested by John Harrison -- the two subproofs may be\ \ \\<^emph>\much\ simpler than the direct proof.\ unfolding congruent2_def equiv_def refl_on_def by (blast intro: trans) lemma congruent2_commuteI: assumes equivA: "equiv A r" and commute: "\y z. y \ A \ z \ A \ f y z = f z y" and congt: "\y z w. w \ A \ (y,z) \ r \ f w y = f w z" shows "f respects2 r" proof (rule congruent2I [OF equivA equivA]) note eqv = equivA [THEN equiv_type, THEN subsetD, THEN SigmaE2] show "\y z w. \w \ A; (y, z) \ r\ \ f y w = f z w" by (iprover intro: commute [THEN trans] sym congt elim: eqv) show "\y z w. \w \ A; (y, z) \ r\ \ f w y = f w z" by (iprover intro: congt elim: eqv) qed subsection \Quotients and finiteness\ text \Suggested by Florian Kammüller\ lemma finite_quotient: assumes "finite A" "r \ A \ A" shows "finite (A//r)" \ \recall @{thm equiv_type}\ proof - have "A//r \ Pow A" using assms unfolding quotient_def by blast moreover have "finite (Pow A)" using assms by simp ultimately show ?thesis by (iprover intro: finite_subset) qed lemma finite_equiv_class: "finite A \ r \ A \ A \ X \ A//r \ finite X" unfolding quotient_def by (erule rev_finite_subset) blast lemma equiv_imp_dvd_card: assumes "finite A" "equiv A r" "\X. X \ A//r \ k dvd card X" shows "k dvd card A" proof (rule Union_quotient [THEN subst]) show "k dvd card (\ (A // r))" apply (rule dvd_partition) using assms by (auto simp: Union_quotient dest: quotient_disj) qed (use assms in blast) lemma card_quotient_disjoint: assumes "finite A" "inj_on (\x. {x} // r) A" shows "card (A//r) = card A" proof - have "\i\A. \j\A. i \ j \ r `` {j} \ r `` {i}" using assms by (fastforce simp add: quotient_def inj_on_def) with assms show ?thesis by (simp add: quotient_def card_UN_disjoint) qed text \By Jakub Kądziołka:\ lemma sum_fun_comp: assumes "finite S" "finite R" "g ` S \ R" shows "(\x \ S. f (g x)) = (\y \ R. of_nat (card {x \ S. g x = y}) * f y)" proof - let ?r = "relation_of (\p q. g p = g q) S" have eqv: "equiv S ?r" unfolding relation_of_def by (auto intro: comp_equivI) have finite: "C \ S//?r \ finite C" for C - by (fact finite_equiv_class[OF `finite S` equiv_type[OF `equiv S ?r`]]) + by (fact finite_equiv_class[OF \finite S\ equiv_type[OF \equiv S ?r\]]) have disjoint: "A \ S//?r \ B \ S//?r \ A \ B \ A \ B = {}" for A B using eqv quotient_disj by blast let ?cls = "\y. {x \ S. y = g x}" have quot_as_img: "S//?r = ?cls ` g ` S" by (auto simp add: relation_of_def quotient_def) have cls_inj: "inj_on ?cls (g ` S)" by (auto intro: inj_onI) have rest_0: "(\y \ R - g ` S. of_nat (card (?cls y)) * f y) = 0" proof - have "of_nat (card (?cls y)) * f y = 0" if asm: "y \ R - g ` S" for y proof - from asm have *: "?cls y = {}" by auto show ?thesis unfolding * by simp qed thus ?thesis by simp qed have "(\x \ S. f (g x)) = (\C \ S//?r. \x \ C. f (g x))" using eqv finite disjoint by (simp flip: sum.Union_disjoint[simplified] add: Union_quotient) also have "... = (\y \ g ` S. \x \ ?cls y. f (g x))" unfolding quot_as_img by (simp add: sum.reindex[OF cls_inj]) also have "... = (\y \ g ` S. \x \ ?cls y. f y)" by auto also have "... = (\y \ g ` S. of_nat (card (?cls y)) * f y)" by (simp flip: sum_constant) also have "... = (\y \ R. of_nat (card (?cls y)) * f y)" using rest_0 by (simp add: sum.subset_diff[OF \g ` S \ R\ \finite R\]) finally show ?thesis by (simp add: eq_commute) qed subsection \Projection\ definition proj :: "('b \ 'a) set \ 'b \ 'a set" where "proj r x = r `` {x}" lemma proj_preserves: "x \ A \ proj r x \ A//r" unfolding proj_def by (rule quotientI) lemma proj_in_iff: assumes "equiv A r" shows "proj r x \ A//r \ x \ A" (is "?lhs \ ?rhs") proof assume ?rhs then show ?lhs by (simp add: proj_preserves) next assume ?lhs then show ?rhs unfolding proj_def quotient_def proof clarsimp fix y assume y: "y \ A" and "r `` {x} = r `` {y}" moreover have "y \ r `` {y}" using assms y unfolding equiv_def refl_on_def by blast ultimately have "(x, y) \ r" by blast then show "x \ A" using assms unfolding equiv_def refl_on_def by blast qed qed lemma proj_iff: "equiv A r \ {x, y} \ A \ proj r x = proj r y \ (x, y) \ r" by (simp add: proj_def eq_equiv_class_iff) (* lemma in_proj: "\equiv A r; x \ A\ \ x \ proj r x" unfolding proj_def equiv_def refl_on_def by blast *) lemma proj_image: "proj r ` A = A//r" unfolding proj_def[abs_def] quotient_def by blast lemma in_quotient_imp_non_empty: "equiv A r \ X \ A//r \ X \ {}" unfolding quotient_def using equiv_class_self by fast lemma in_quotient_imp_in_rel: "equiv A r \ X \ A//r \ {x, y} \ X \ (x, y) \ r" using quotient_eq_iff[THEN iffD1] by fastforce lemma in_quotient_imp_closed: "equiv A r \ X \ A//r \ x \ X \ (x, y) \ r \ y \ X" unfolding quotient_def equiv_def trans_def by blast lemma in_quotient_imp_subset: "equiv A r \ X \ A//r \ X \ A" using in_quotient_imp_in_rel equiv_type by fastforce subsection \Equivalence relations -- predicate version\ text \Partial equivalences.\ definition part_equivp :: "('a \ 'a \ bool) \ bool" where "part_equivp R \ (\x. R x x) \ (\x y. R x y \ R x x \ R y y \ R x = R y)" \ \John-Harrison-style characterization\ lemma part_equivpI: "\x. R x x \ symp R \ transp R \ part_equivp R" by (auto simp add: part_equivp_def) (auto elim: sympE transpE) lemma part_equivpE: assumes "part_equivp R" obtains x where "R x x" and "symp R" and "transp R" proof - from assms have 1: "\x. R x x" and 2: "\x y. R x y \ R x x \ R y y \ R x = R y" unfolding part_equivp_def by blast+ from 1 obtain x where "R x x" .. moreover have "symp R" proof (rule sympI) fix x y assume "R x y" with 2 [of x y] show "R y x" by auto qed moreover have "transp R" proof (rule transpI) fix x y z assume "R x y" and "R y z" with 2 [of x y] 2 [of y z] show "R x z" by auto qed ultimately show thesis by (rule that) qed lemma part_equivp_refl_symp_transp: "part_equivp R \ (\x. R x x) \ symp R \ transp R" by (auto intro: part_equivpI elim: part_equivpE) lemma part_equivp_symp: "part_equivp R \ R x y \ R y x" by (erule part_equivpE, erule sympE) lemma part_equivp_transp: "part_equivp R \ R x y \ R y z \ R x z" by (erule part_equivpE, erule transpE) lemma part_equivp_typedef: "part_equivp R \ \d. d \ {c. \x. R x x \ c = Collect (R x)}" by (auto elim: part_equivpE) text \Total equivalences.\ definition equivp :: "('a \ 'a \ bool) \ bool" where "equivp R \ (\x y. R x y = (R x = R y))" \ \John-Harrison-style characterization\ lemma equivpI: "reflp R \ symp R \ transp R \ equivp R" by (auto elim: reflpE sympE transpE simp add: equivp_def) lemma equivpE: assumes "equivp R" obtains "reflp R" and "symp R" and "transp R" using assms by (auto intro!: that reflpI sympI transpI simp add: equivp_def) lemma equivp_implies_part_equivp: "equivp R \ part_equivp R" by (auto intro: part_equivpI elim: equivpE reflpE) lemma equivp_equiv: "equiv UNIV A \ equivp (\x y. (x, y) \ A)" by (auto intro!: equivI equivpI [to_set] elim!: equivE equivpE [to_set]) lemma equivp_reflp_symp_transp: "equivp R \ reflp R \ symp R \ transp R" by (auto intro: equivpI elim: equivpE) lemma identity_equivp: "equivp (=)" by (auto intro: equivpI reflpI sympI transpI) lemma equivp_reflp: "equivp R \ R x x" by (erule equivpE, erule reflpE) lemma equivp_symp: "equivp R \ R x y \ R y x" by (erule equivpE, erule sympE) lemma equivp_transp: "equivp R \ R x y \ R y z \ R x z" by (erule equivpE, erule transpE) lemma equivp_rtranclp: "symp r \ equivp r\<^sup>*\<^sup>*" by(intro equivpI reflpI sympI transpI)(auto dest: sympD[OF symp_rtranclp]) lemmas equivp_rtranclp_symclp [simp] = equivp_rtranclp[OF symp_symclp] lemma equivp_vimage2p: "equivp R \ equivp (vimage2p f f R)" by(auto simp add: equivp_def vimage2p_def dest: fun_cong) lemma equivp_imp_transp: "equivp R \ transp R" by(simp add: equivp_reflp_symp_transp) subsection \Equivalence closure\ definition equivclp :: "('a \ 'a \ bool) \ 'a \ 'a \ bool" where "equivclp r = (symclp r)\<^sup>*\<^sup>*" lemma transp_equivclp [simp]: "transp (equivclp r)" by(simp add: equivclp_def) lemma reflp_equivclp [simp]: "reflp (equivclp r)" by(simp add: equivclp_def) lemma symp_equivclp [simp]: "symp (equivclp r)" by(simp add: equivclp_def) lemma equivp_evquivclp [simp]: "equivp (equivclp r)" by(simp add: equivpI) lemma tranclp_equivclp [simp]: "(equivclp r)\<^sup>+\<^sup>+ = equivclp r" by(simp add: equivclp_def) lemma rtranclp_equivclp [simp]: "(equivclp r)\<^sup>*\<^sup>* = equivclp r" by(simp add: equivclp_def) lemma symclp_equivclp [simp]: "symclp (equivclp r) = equivclp r" by(simp add: equivclp_def symp_symclp_eq) lemma equivclp_symclp [simp]: "equivclp (symclp r) = equivclp r" by(simp add: equivclp_def) lemma equivclp_conversep [simp]: "equivclp (conversep r) = equivclp r" by(simp add: equivclp_def) lemma equivclp_sym [sym]: "equivclp r x y \ equivclp r y x" by(rule sympD[OF symp_equivclp]) lemma equivclp_OO_equivclp_le_equivclp: "equivclp r OO equivclp r \ equivclp r" by(rule transp_relcompp_less_eq transp_equivclp)+ lemma rtranlcp_le_equivclp: "r\<^sup>*\<^sup>* \ equivclp r" unfolding equivclp_def by(rule rtranclp_mono)(simp add: symclp_pointfree) lemma rtranclp_conversep_le_equivclp: "r\\\<^sup>*\<^sup>* \ equivclp r" unfolding equivclp_def by(rule rtranclp_mono)(simp add: symclp_pointfree) lemma symclp_rtranclp_le_equivclp: "symclp r\<^sup>*\<^sup>* \ equivclp r" unfolding symclp_pointfree by(rule le_supI)(simp_all add: rtranclp_conversep[symmetric] rtranlcp_le_equivclp rtranclp_conversep_le_equivclp) lemma r_OO_conversep_into_equivclp: "r\<^sup>*\<^sup>* OO r\\\<^sup>*\<^sup>* \ equivclp r" by(blast intro: order_trans[OF _ equivclp_OO_equivclp_le_equivclp] relcompp_mono rtranlcp_le_equivclp rtranclp_conversep_le_equivclp del: predicate2I) lemma equivclp_induct [consumes 1, case_names base step, induct pred: equivclp]: assumes a: "equivclp r a b" and cases: "P a" "\y z. equivclp r a y \ r y z \ r z y \ P y \ P z" shows "P b" using a unfolding equivclp_def by(induction rule: rtranclp_induct; fold equivclp_def; blast intro: cases elim: symclpE) lemma converse_equivclp_induct [consumes 1, case_names base step]: assumes major: "equivclp r a b" and cases: "P b" "\y z. r y z \ r z y \ equivclp r z b \ P z \ P y" shows "P a" using major unfolding equivclp_def by(induction rule: converse_rtranclp_induct; fold equivclp_def; blast intro: cases elim: symclpE) lemma equivclp_refl [simp]: "equivclp r x x" by(rule reflpD[OF reflp_equivclp]) lemma r_into_equivclp [intro]: "r x y \ equivclp r x y" unfolding equivclp_def by(blast intro: symclpI) lemma converse_r_into_equivclp [intro]: "r y x \ equivclp r x y" unfolding equivclp_def by(blast intro: symclpI) lemma rtranclp_into_equivclp: "r\<^sup>*\<^sup>* x y \ equivclp r x y" using rtranlcp_le_equivclp[of r] by blast lemma converse_rtranclp_into_equivclp: "r\<^sup>*\<^sup>* y x \ equivclp r x y" by(blast intro: equivclp_sym rtranclp_into_equivclp) lemma equivclp_into_equivclp: "\ equivclp r a b; r b c \ r c b \ \ equivclp r a c" unfolding equivclp_def by(erule rtranclp.rtrancl_into_rtrancl)(auto intro: symclpI) lemma equivclp_trans [trans]: "\ equivclp r a b; equivclp r b c \ \ equivclp r a c" using equivclp_OO_equivclp_le_equivclp[of r] by blast hide_const (open) proj end diff --git a/src/HOL/ex/Rewrite_Examples.thy b/src/HOL/Examples/Rewrite_Examples.thy rename from src/HOL/ex/Rewrite_Examples.thy rename to src/HOL/Examples/Rewrite_Examples.thy --- a/src/HOL/ex/Rewrite_Examples.thy +++ b/src/HOL/Examples/Rewrite_Examples.thy @@ -1,300 +1,301 @@ theory Rewrite_Examples imports Main "HOL-Library.Rewrite" begin section \The rewrite Proof Method by Example\ -(* This file is intended to give an overview over - the features of the pattern-based rewrite proof method. +text\ +This theory gives an overview over the features of the pattern-based rewrite proof method. - See also https://www21.in.tum.de/~noschinl/Pattern-2014/ -*) +Documentation: @{url "https://arxiv.org/abs/2111.04082"} +\ + lemma fixes a::int and b::int and c::int assumes "P (b + a)" shows "P (a + b)" by (rewrite at "a + b" add.commute) (rule assms) (* Selecting a specific subterm in a large, ambiguous term. *) lemma fixes a b c :: int assumes "f (a - a + (a - a)) + f ( 0 + c) = f 0 + f c" shows "f (a - a + (a - a)) + f ((a - a) + c) = f 0 + f c" by (rewrite in "f _ + f \ = _" diff_self) fact lemma fixes a b c :: int assumes "f (a - a + 0 ) + f ((a - a) + c) = f 0 + f c" shows "f (a - a + (a - a)) + f ((a - a) + c) = f 0 + f c" by (rewrite at "f (_ + \) + f _ = _" diff_self) fact lemma fixes a b c :: int assumes "f ( 0 + (a - a)) + f ((a - a) + c) = f 0 + f c" shows "f (a - a + (a - a)) + f ((a - a) + c) = f 0 + f c" by (rewrite in "f (\ + _) + _ = _" diff_self) fact lemma fixes a b c :: int assumes "f (a - a + 0 ) + f ((a - a) + c) = f 0 + f c" shows "f (a - a + (a - a)) + f ((a - a) + c) = f 0 + f c" by (rewrite in "f (_ + \) + _ = _" diff_self) fact lemma fixes x y :: nat shows"x + y > c \ y + x > c" by (rewrite at "\ > c" add.commute) assumption (* We can also rewrite in the assumptions. *) lemma fixes x y :: nat assumes "y + x > c \ y + x > c" shows "x + y > c \ y + x > c" by (rewrite in asm add.commute) fact lemma fixes x y :: nat assumes "y + x > c \ y + x > c" shows "x + y > c \ y + x > c" by (rewrite in "x + y > c" at asm add.commute) fact lemma fixes x y :: nat assumes "y + x > c \ y + x > c" shows "x + y > c \ y + x > c" by (rewrite at "\ > c" at asm add.commute) fact lemma assumes "P {x::int. y + 1 = 1 + x}" shows "P {x::int. y + 1 = x + 1}" by (rewrite at "x+1" in "{x::int. \ }" add.commute) fact lemma assumes "P {x::int. y + 1 = 1 + x}" shows "P {x::int. y + 1 = x + 1}" by (rewrite at "any_identifier_will_work+1" in "{any_identifier_will_work::int. \ }" add.commute) fact lemma assumes "P {(x::nat, y::nat, z). x + z * 3 = Q (\s t. s * t + y - 3)}" shows "P {(x::nat, y::nat, z). x + z * 3 = Q (\s t. y + s * t - 3)}" by (rewrite at "b + d * e" in "\(a, b, c). _ = Q (\d e. \)" add.commute) fact (* This is not limited to the first assumption *) lemma assumes "PROP P \ PROP Q" shows "PROP R \ PROP P \ PROP Q" by (rewrite at asm assms) lemma assumes "PROP P \ PROP Q" shows "PROP R \ PROP R \ PROP P \ PROP Q" by (rewrite at asm assms) (* Rewriting "at asm" selects each full assumption, not any parts *) lemma assumes "(PROP P \ PROP Q) \ (PROP S \ PROP R)" shows "PROP S \ (PROP P \ PROP Q) \ PROP R" apply (rewrite at asm assms) apply assumption done (* Rewriting with conditional rewriting rules works just as well. *) lemma test_theorem: fixes x :: nat shows "x \ y \ x \ y \ x = y" by (rule Orderings.order_antisym) (* Premises of the conditional rule yield new subgoals. The assumptions of the goal are propagated into these subgoals *) lemma fixes f :: "nat \ nat" shows "f x \ 0 \ f x \ 0 \ f x = 0" apply (rewrite at "f x" to "0" test_theorem) apply assumption apply assumption apply (rule refl) done (* This holds also for rewriting in assumptions. The order of assumptions is preserved *) lemma assumes rewr: "PROP P \ PROP Q \ PROP R \ PROP R'" assumes A1: "PROP S \ PROP T \ PROP U \ PROP P" assumes A2: "PROP S \ PROP T \ PROP U \ PROP Q" assumes C: "PROP S \ PROP R' \ PROP T \ PROP U \ PROP V" shows "PROP S \ PROP R \ PROP T \ PROP U \ PROP V" apply (rewrite at asm rewr) apply (fact A1) apply (fact A2) apply (fact C) done (* Instantiation. Since all rewriting is now done via conversions, instantiation becomes fairly easy to do. *) (* We first introduce a function f and an extended version of f that is annotated with an invariant. *) fun f :: "nat \ nat" where "f n = n" definition "f_inv (I :: nat \ bool) n \ f n" lemma annotate_f: "f = f_inv I" by (simp add: f_inv_def fun_eq_iff) (* We have a lemma with a bound variable n, and want to add an invariant to f. *) lemma assumes "P (\n. f_inv (\_. True) n + 1) = x" shows "P (\n. f n + 1) = x" by (rewrite to "f_inv (\_. True)" annotate_f) fact (* We can also add an invariant that contains the variable n bound in the outer context. For this, we need to bind this variable to an identifier. *) lemma assumes "P (\n. f_inv (\x. n < x + 1) n + 1) = x" shows "P (\n. f n + 1) = x" by (rewrite in "\n. \" to "f_inv (\x. n < x + 1)" annotate_f) fact (* Any identifier will work *) lemma assumes "P (\n. f_inv (\x. n < x + 1) n + 1) = x" shows "P (\n. f n + 1) = x" by (rewrite in "\abc. \" to "f_inv (\x. abc < x + 1)" annotate_f) fact (* The "for" keyword. *) lemma assumes "P (2 + 1)" shows "\x y. P (1 + 2 :: nat)" by (rewrite in "P (1 + 2)" at for (x) add.commute) fact lemma assumes "\x y. P (y + x)" shows "\x y. P (x + y :: nat)" by (rewrite in "P (x + _)" at for (x y) add.commute) fact lemma assumes "\x y z. y + x + z = z + y + (x::int)" shows "\x y z. x + y + z = z + y + (x::int)" by (rewrite at "x + y" in "x + y + z" in for (x y z) add.commute) fact lemma assumes "\x y z. z + (x + y) = z + y + (x::int)" shows "\x y z. x + y + z = z + y + (x::int)" by (rewrite at "(_ + y) + z" in for (y z) add.commute) fact lemma assumes "\x y z. x + y + z = y + z + (x::int)" shows "\x y z. x + y + z = z + y + (x::int)" by (rewrite at "\ + _" at "_ = \" in for () add.commute) fact lemma assumes eq: "\x. P x \ g x = x" assumes f1: "\x. Q x \ P x" assumes f2: "\x. Q x \ x" shows "\x. Q x \ g x" apply (rewrite at "g x" in for (x) eq) apply (fact f1) apply (fact f2) done (* The for keyword can be used anywhere in the pattern where there is an \-Quantifier. *) lemma assumes "(\(x::int). x < 1 + x)" and "(x::int) + 1 > x" shows "(\(x::int). x + 1 > x) \ (x::int) + 1 > x" by (rewrite at "x + 1" in for (x) at asm add.commute) (rule assms) (* The rewrite method also has an ML interface *) lemma assumes "\a b. P ((a + 1) * (1 + b)) " shows "\a b :: nat. P ((a + 1) * (b + 1))" apply (tactic \ let val (x, ctxt) = yield_singleton Variable.add_fixes "x" \<^context> (* Note that the pattern order is reversed *) val pat = [ Rewrite.For [(x, SOME \<^Type>\nat\)], Rewrite.In, Rewrite.Term (\<^Const>\plus \<^Type>\nat\ for \Free (x, \<^Type>\nat\)\ \<^term>\1 :: nat\\, [])] val to = NONE in CCONVERSION (Rewrite.rewrite_conv ctxt (pat, to) @{thms add.commute}) 1 end \) apply (fact assms) done lemma assumes "Q (\b :: int. P (\a. a + b) (\a. a + b))" shows "Q (\b :: int. P (\a. a + b) (\a. b + a))" apply (tactic \ let val (x, ctxt) = yield_singleton Variable.add_fixes "x" \<^context> val pat = [ Rewrite.Concl, Rewrite.In, Rewrite.Term (Free ("Q", (\<^Type>\int\ --> TVar (("'b",0), [])) --> \<^Type>\bool\) $ Abs ("x", \<^Type>\int\, Rewrite.mk_hole 1 (\<^Type>\int\ --> TVar (("'b",0), [])) $ Bound 0), [(x, \<^Type>\int\)]), Rewrite.In, Rewrite.Term (\<^Const>\plus \<^Type>\int\ for \Free (x, \<^Type>\int\)\ \Var (("c", 0), \<^Type>\int\)\\, []) ] val to = NONE in CCONVERSION (Rewrite.rewrite_conv ctxt (pat, to) @{thms add.commute}) 1 end \) apply (fact assms) done (* There is also conversion-like rewrite function: *) ML \ val ct = \<^cprop>\Q (\b :: int. P (\a. a + b) (\a. b + a))\ val (x, ctxt) = yield_singleton Variable.add_fixes "x" \<^context> val pat = [ Rewrite.Concl, Rewrite.In, Rewrite.Term (Free ("Q", (\<^typ>\int\ --> TVar (("'b",0), [])) --> \<^typ>\bool\) $ Abs ("x", \<^typ>\int\, Rewrite.mk_hole 1 (\<^typ>\int\ --> TVar (("'b",0), [])) $ Bound 0), [(x, \<^typ>\int\)]), Rewrite.In, Rewrite.Term (\<^Const>\plus \<^Type>\int\ for \Free (x, \<^Type>\int\)\ \Var (("c", 0), \<^Type>\int\)\\, []) ] val to = NONE val th = Rewrite.rewrite_conv ctxt (pat, to) @{thms add.commute} ct \ -section \Regression tests\ +text \Some regression tests\ ML \ val ct = \<^cterm>\(\b :: int. (\a. b + a))\ val (x, ctxt) = yield_singleton Variable.add_fixes "x" \<^context> val pat = [ Rewrite.In, Rewrite.Term (\<^Const>\plus \<^Type>\int\ for \Var (("c", 0), \<^Type>\int\)\ \Var (("c", 0), \<^Type>\int\)\\, []) ] val to = NONE val _ = case try (Rewrite.rewrite_conv ctxt (pat, to) @{thms add.commute}) ct of NONE => () | _ => error "should not have matched anything" \ ML \ Rewrite.params_pconv (Conv.all_conv |> K |> K) \<^context> (Vartab.empty, []) \<^cterm>\\x. PROP A\ \ lemma assumes eq: "PROP A \ PROP B \ PROP C" assumes f1: "PROP D \ PROP A" assumes f2: "PROP D \ PROP C" shows "\x. PROP D \ PROP B" apply (rewrite eq) apply (fact f1) apply (fact f2) done end diff --git a/src/HOL/Examples/document/root.tex b/src/HOL/Examples/document/root.tex --- a/src/HOL/Examples/document/root.tex +++ b/src/HOL/Examples/document/root.tex @@ -1,26 +1,26 @@ \documentclass[11pt,a4paper]{article} \usepackage[T1]{fontenc} \usepackage[only,bigsqcap]{stmaryrd} -\usepackage{ifthen,proof,amssymb,isabelle,isabellesym} +\usepackage{ifthen,proof,amssymb,isabelle,isabellesym,wasysym} \isabellestyle{literal} \usepackage{pdfsetup}\urlstyle{rm} \hyphenation{Isabelle} \begin{document} \title{Notable Examples in Isabelle/HOL} \maketitle \tableofcontents \parindent 0pt \parskip 0.5ex \input{session} \bibliographystyle{abbrv} \bibliography{root} \end{document} diff --git a/src/HOL/Library/Multiset.thy b/src/HOL/Library/Multiset.thy --- a/src/HOL/Library/Multiset.thy +++ b/src/HOL/Library/Multiset.thy @@ -1,3996 +1,4092 @@ (* Title: HOL/Library/Multiset.thy Author: Tobias Nipkow, Markus Wenzel, Lawrence C Paulson, Norbert Voelker Author: Andrei Popescu, TU Muenchen Author: Jasmin Blanchette, Inria, LORIA, MPII Author: Dmitriy Traytel, TU Muenchen Author: Mathias Fleury, MPII + Author: Martin Desharnais, MPI-INF Saarbruecken *) section \(Finite) Multisets\ theory Multiset -imports Cancellation + imports Cancellation begin subsection \The type of multisets\ typedef 'a multiset = \{f :: 'a \ nat. finite {x. f x > 0}}\ morphisms count Abs_multiset proof show \(\x. 0::nat) \ {f. finite {x. f x > 0}}\ by simp qed setup_lifting type_definition_multiset lemma count_Abs_multiset: \count (Abs_multiset f) = f\ if \finite {x. f x > 0}\ by (rule Abs_multiset_inverse) (simp add: that) lemma multiset_eq_iff: "M = N \ (\a. count M a = count N a)" by (simp only: count_inject [symmetric] fun_eq_iff) lemma multiset_eqI: "(\x. count A x = count B x) \ A = B" using multiset_eq_iff by auto text \Preservation of the representing set \<^term>\multiset\.\ lemma diff_preserves_multiset: \finite {x. 0 < M x - N x}\ if \finite {x. 0 < M x}\ for M N :: \'a \ nat\ using that by (rule rev_finite_subset) auto lemma filter_preserves_multiset: \finite {x. 0 < (if P x then M x else 0)}\ if \finite {x. 0 < M x}\ for M N :: \'a \ nat\ using that by (rule rev_finite_subset) auto lemmas in_multiset = diff_preserves_multiset filter_preserves_multiset subsection \Representing multisets\ text \Multiset enumeration\ instantiation multiset :: (type) cancel_comm_monoid_add begin lift_definition zero_multiset :: \'a multiset\ is \\a. 0\ by simp abbreviation empty_mset :: \'a multiset\ (\{#}\) where \empty_mset \ 0\ lift_definition plus_multiset :: \'a multiset \ 'a multiset \ 'a multiset\ is \\M N a. M a + N a\ by simp lift_definition minus_multiset :: \'a multiset \ 'a multiset \ 'a multiset\ is \\M N a. M a - N a\ by (rule diff_preserves_multiset) instance by (standard; transfer) (simp_all add: fun_eq_iff) end context begin qualified definition is_empty :: "'a multiset \ bool" where [code_abbrev]: "is_empty A \ A = {#}" end lemma add_mset_in_multiset: \finite {x. 0 < (if x = a then Suc (M x) else M x)}\ if \finite {x. 0 < M x}\ using that by (simp add: flip: insert_Collect) lift_definition add_mset :: "'a \ 'a multiset \ 'a multiset" is "\a M b. if b = a then Suc (M b) else M b" by (rule add_mset_in_multiset) syntax "_multiset" :: "args \ 'a multiset" ("{#(_)#}") translations "{#x, xs#}" == "CONST add_mset x {#xs#}" "{#x#}" == "CONST add_mset x {#}" lemma count_empty [simp]: "count {#} a = 0" by (simp add: zero_multiset.rep_eq) lemma count_add_mset [simp]: "count (add_mset b A) a = (if b = a then Suc (count A a) else count A a)" by (simp add: add_mset.rep_eq) lemma count_single: "count {#b#} a = (if b = a then 1 else 0)" by simp lemma add_mset_not_empty [simp]: \add_mset a A \ {#}\ and empty_not_add_mset [simp]: "{#} \ add_mset a A" by (auto simp: multiset_eq_iff) lemma add_mset_add_mset_same_iff [simp]: "add_mset a A = add_mset a B \ A = B" by (auto simp: multiset_eq_iff) lemma add_mset_commute: "add_mset x (add_mset y M) = add_mset y (add_mset x M)" by (auto simp: multiset_eq_iff) subsection \Basic operations\ subsubsection \Conversion to set and membership\ definition set_mset :: \'a multiset \ 'a set\ where \set_mset M = {x. count M x > 0}\ abbreviation member_mset :: \'a \ 'a multiset \ bool\ where \member_mset a M \ a \ set_mset M\ notation member_mset (\'(\#')\) and member_mset (\(_/ \# _)\ [50, 51] 50) notation (ASCII) member_mset (\'(:#')\) and member_mset (\(_/ :# _)\ [50, 51] 50) abbreviation not_member_mset :: \'a \ 'a multiset \ bool\ where \not_member_mset a M \ a \ set_mset M\ notation not_member_mset (\'(\#')\) and not_member_mset (\(_/ \# _)\ [50, 51] 50) notation (ASCII) not_member_mset (\'(~:#')\) and not_member_mset (\(_/ ~:# _)\ [50, 51] 50) context begin qualified abbreviation Ball :: "'a multiset \ ('a \ bool) \ bool" where "Ball M \ Set.Ball (set_mset M)" qualified abbreviation Bex :: "'a multiset \ ('a \ bool) \ bool" where "Bex M \ Set.Bex (set_mset M)" end syntax "_MBall" :: "pttrn \ 'a set \ bool \ bool" ("(3\_\#_./ _)" [0, 0, 10] 10) "_MBex" :: "pttrn \ 'a set \ bool \ bool" ("(3\_\#_./ _)" [0, 0, 10] 10) syntax (ASCII) "_MBall" :: "pttrn \ 'a set \ bool \ bool" ("(3\_:#_./ _)" [0, 0, 10] 10) "_MBex" :: "pttrn \ 'a set \ bool \ bool" ("(3\_:#_./ _)" [0, 0, 10] 10) translations "\x\#A. P" \ "CONST Multiset.Ball A (\x. P)" "\x\#A. P" \ "CONST Multiset.Bex A (\x. P)" print_translation \ [Syntax_Trans.preserve_binder_abs2_tr' \<^const_syntax>\Multiset.Ball\ \<^syntax_const>\_MBall\, Syntax_Trans.preserve_binder_abs2_tr' \<^const_syntax>\Multiset.Bex\ \<^syntax_const>\_MBex\] \ \ \to avoid eta-contraction of body\ lemma count_eq_zero_iff: "count M x = 0 \ x \# M" by (auto simp add: set_mset_def) lemma not_in_iff: "x \# M \ count M x = 0" by (auto simp add: count_eq_zero_iff) lemma count_greater_zero_iff [simp]: "count M x > 0 \ x \# M" by (auto simp add: set_mset_def) lemma count_inI: assumes "count M x = 0 \ False" shows "x \# M" proof (rule ccontr) assume "x \# M" with assms show False by (simp add: not_in_iff) qed lemma in_countE: assumes "x \# M" obtains n where "count M x = Suc n" proof - from assms have "count M x > 0" by simp then obtain n where "count M x = Suc n" using gr0_conv_Suc by blast with that show thesis . qed lemma count_greater_eq_Suc_zero_iff [simp]: "count M x \ Suc 0 \ x \# M" by (simp add: Suc_le_eq) lemma count_greater_eq_one_iff [simp]: "count M x \ 1 \ x \# M" by simp lemma set_mset_empty [simp]: "set_mset {#} = {}" by (simp add: set_mset_def) lemma set_mset_single: "set_mset {#b#} = {b}" by (simp add: set_mset_def) lemma set_mset_eq_empty_iff [simp]: "set_mset M = {} \ M = {#}" by (auto simp add: multiset_eq_iff count_eq_zero_iff) lemma finite_set_mset [iff]: "finite (set_mset M)" using count [of M] by simp lemma set_mset_add_mset_insert [simp]: \set_mset (add_mset a A) = insert a (set_mset A)\ by (auto simp flip: count_greater_eq_Suc_zero_iff split: if_splits) lemma multiset_nonemptyE [elim]: assumes "A \ {#}" obtains x where "x \# A" proof - have "\x. x \# A" by (rule ccontr) (insert assms, auto) with that show ?thesis by blast qed subsubsection \Union\ lemma count_union [simp]: "count (M + N) a = count M a + count N a" by (simp add: plus_multiset.rep_eq) lemma set_mset_union [simp]: "set_mset (M + N) = set_mset M \ set_mset N" by (simp only: set_eq_iff count_greater_zero_iff [symmetric] count_union) simp lemma union_mset_add_mset_left [simp]: "add_mset a A + B = add_mset a (A + B)" by (auto simp: multiset_eq_iff) lemma union_mset_add_mset_right [simp]: "A + add_mset a B = add_mset a (A + B)" by (auto simp: multiset_eq_iff) lemma add_mset_add_single: \add_mset a A = A + {#a#}\ by (subst union_mset_add_mset_right, subst add.comm_neutral) standard subsubsection \Difference\ instance multiset :: (type) comm_monoid_diff by standard (transfer; simp add: fun_eq_iff) lemma count_diff [simp]: "count (M - N) a = count M a - count N a" by (simp add: minus_multiset.rep_eq) lemma add_mset_diff_bothsides: \add_mset a M - add_mset a A = M - A\ by (auto simp: multiset_eq_iff) lemma in_diff_count: "a \# M - N \ count N a < count M a" by (simp add: set_mset_def) lemma count_in_diffI: assumes "\n. count N x = n + count M x \ False" shows "x \# M - N" proof (rule ccontr) assume "x \# M - N" then have "count N x = (count N x - count M x) + count M x" by (simp add: in_diff_count not_less) with assms show False by auto qed lemma in_diff_countE: assumes "x \# M - N" obtains n where "count M x = Suc n + count N x" proof - from assms have "count M x - count N x > 0" by (simp add: in_diff_count) then have "count M x > count N x" by simp then obtain n where "count M x = Suc n + count N x" using less_iff_Suc_add by auto with that show thesis . qed lemma in_diffD: assumes "a \# M - N" shows "a \# M" proof - have "0 \ count N a" by simp also from assms have "count N a < count M a" by (simp add: in_diff_count) finally show ?thesis by simp qed lemma set_mset_diff: "set_mset (M - N) = {a. count N a < count M a}" by (simp add: set_mset_def) lemma diff_empty [simp]: "M - {#} = M \ {#} - M = {#}" by rule (fact Groups.diff_zero, fact Groups.zero_diff) lemma diff_cancel: "A - A = {#}" by (fact Groups.diff_cancel) lemma diff_union_cancelR: "M + N - N = (M::'a multiset)" by (fact add_diff_cancel_right') lemma diff_union_cancelL: "N + M - N = (M::'a multiset)" by (fact add_diff_cancel_left') lemma diff_right_commute: fixes M N Q :: "'a multiset" shows "M - N - Q = M - Q - N" by (fact diff_right_commute) lemma diff_add: fixes M N Q :: "'a multiset" shows "M - (N + Q) = M - N - Q" by (rule sym) (fact diff_diff_add) lemma insert_DiffM [simp]: "x \# M \ add_mset x (M - {#x#}) = M" by (clarsimp simp: multiset_eq_iff) lemma insert_DiffM2: "x \# M \ (M - {#x#}) + {#x#} = M" by simp lemma diff_union_swap: "a \ b \ add_mset b (M - {#a#}) = add_mset b M - {#a#}" by (auto simp add: multiset_eq_iff) lemma diff_add_mset_swap [simp]: "b \# A \ add_mset b M - A = add_mset b (M - A)" by (auto simp add: multiset_eq_iff simp: not_in_iff) lemma diff_union_swap2 [simp]: "y \# M \ add_mset x M - {#y#} = add_mset x (M - {#y#})" by (metis add_mset_diff_bothsides diff_union_swap diff_zero insert_DiffM) lemma diff_diff_add_mset [simp]: "(M::'a multiset) - N - P = M - (N + P)" by (rule diff_diff_add) lemma diff_union_single_conv: "a \# J \ I + J - {#a#} = I + (J - {#a#})" by (simp add: multiset_eq_iff Suc_le_eq) lemma mset_add [elim?]: assumes "a \# A" obtains B where "A = add_mset a B" proof - from assms have "A = add_mset a (A - {#a#})" by simp with that show thesis . qed lemma union_iff: "a \# A + B \ a \# A \ a \# B" by auto subsubsection \Min and Max\ abbreviation Min_mset :: "'a::linorder multiset \ 'a" where "Min_mset m \ Min (set_mset m)" abbreviation Max_mset :: "'a::linorder multiset \ 'a" where "Max_mset m \ Max (set_mset m)" subsubsection \Equality of multisets\ lemma single_eq_single [simp]: "{#a#} = {#b#} \ a = b" by (auto simp add: multiset_eq_iff) lemma union_eq_empty [iff]: "M + N = {#} \ M = {#} \ N = {#}" by (auto simp add: multiset_eq_iff) lemma empty_eq_union [iff]: "{#} = M + N \ M = {#} \ N = {#}" by (auto simp add: multiset_eq_iff) lemma multi_self_add_other_not_self [simp]: "M = add_mset x M \ False" by (auto simp add: multiset_eq_iff) lemma add_mset_remove_trivial [simp]: \add_mset x M - {#x#} = M\ by (auto simp: multiset_eq_iff) lemma diff_single_trivial: "\ x \# M \ M - {#x#} = M" by (auto simp add: multiset_eq_iff not_in_iff) lemma diff_single_eq_union: "x \# M \ M - {#x#} = N \ M = add_mset x N" by auto lemma union_single_eq_diff: "add_mset x M = N \ M = N - {#x#}" unfolding add_mset_add_single[of _ M] by (fact add_implies_diff) lemma union_single_eq_member: "add_mset x M = N \ x \# N" by auto lemma add_mset_remove_trivial_If: "add_mset a (N - {#a#}) = (if a \# N then N else add_mset a N)" by (simp add: diff_single_trivial) lemma add_mset_remove_trivial_eq: \N = add_mset a (N - {#a#}) \ a \# N\ by (auto simp: add_mset_remove_trivial_If) lemma union_is_single: "M + N = {#a#} \ M = {#a#} \ N = {#} \ M = {#} \ N = {#a#}" (is "?lhs = ?rhs") proof show ?lhs if ?rhs using that by auto show ?rhs if ?lhs by (metis Multiset.diff_cancel add.commute add_diff_cancel_left' diff_add_zero diff_single_trivial insert_DiffM that) qed lemma single_is_union: "{#a#} = M + N \ {#a#} = M \ N = {#} \ M = {#} \ {#a#} = N" by (auto simp add: eq_commute [of "{#a#}" "M + N"] union_is_single) lemma add_eq_conv_diff: "add_mset a M = add_mset b N \ M = N \ a = b \ M = add_mset b (N - {#a#}) \ N = add_mset a (M - {#b#})" (is "?lhs \ ?rhs") (* shorter: by (simp add: multiset_eq_iff) fastforce *) proof show ?lhs if ?rhs using that by (auto simp add: add_mset_commute[of a b]) show ?rhs if ?lhs proof (cases "a = b") case True with \?lhs\ show ?thesis by simp next case False from \?lhs\ have "a \# add_mset b N" by (rule union_single_eq_member) with False have "a \# N" by auto moreover from \?lhs\ have "M = add_mset b N - {#a#}" by (rule union_single_eq_diff) moreover note False ultimately show ?thesis by (auto simp add: diff_right_commute [of _ "{#a#}"]) qed qed lemma add_mset_eq_single [iff]: "add_mset b M = {#a#} \ b = a \ M = {#}" by (auto simp: add_eq_conv_diff) lemma single_eq_add_mset [iff]: "{#a#} = add_mset b M \ b = a \ M = {#}" by (auto simp: add_eq_conv_diff) lemma insert_noteq_member: assumes BC: "add_mset b B = add_mset c C" and bnotc: "b \ c" shows "c \# B" proof - have "c \# add_mset c C" by simp have nc: "\ c \# {#b#}" using bnotc by simp then have "c \# add_mset b B" using BC by simp then show "c \# B" using nc by simp qed lemma add_eq_conv_ex: "(add_mset a M = add_mset b N) = (M = N \ a = b \ (\K. M = add_mset b K \ N = add_mset a K))" by (auto simp add: add_eq_conv_diff) lemma multi_member_split: "x \# M \ \A. M = add_mset x A" by (rule exI [where x = "M - {#x#}"]) simp lemma multiset_add_sub_el_shuffle: assumes "c \# B" and "b \ c" shows "add_mset b (B - {#c#}) = add_mset b B - {#c#}" proof - from \c \# B\ obtain A where B: "B = add_mset c A" by (blast dest: multi_member_split) have "add_mset b A = add_mset c (add_mset b A) - {#c#}" by simp then have "add_mset b A = add_mset b (add_mset c A) - {#c#}" by (simp add: \b \ c\) then show ?thesis using B by simp qed lemma add_mset_eq_singleton_iff[iff]: "add_mset x M = {#y#} \ M = {#} \ x = y" by auto subsubsection \Pointwise ordering induced by count\ definition subseteq_mset :: "'a multiset \ 'a multiset \ bool" (infix "\#" 50) where "A \# B \ (\a. count A a \ count B a)" definition subset_mset :: "'a multiset \ 'a multiset \ bool" (infix "\#" 50) where "A \# B \ A \# B \ A \ B" abbreviation (input) supseteq_mset :: "'a multiset \ 'a multiset \ bool" (infix "\#" 50) where "supseteq_mset A B \ B \# A" abbreviation (input) supset_mset :: "'a multiset \ 'a multiset \ bool" (infix "\#" 50) where "supset_mset A B \ B \# A" notation (input) subseteq_mset (infix "\#" 50) and supseteq_mset (infix "\#" 50) notation (ASCII) subseteq_mset (infix "<=#" 50) and subset_mset (infix "<#" 50) and supseteq_mset (infix ">=#" 50) and supset_mset (infix ">#" 50) global_interpretation subset_mset: ordering \(\#)\ \(\#)\ by standard (auto simp add: subset_mset_def subseteq_mset_def multiset_eq_iff intro: order.trans order.antisym) interpretation subset_mset: ordered_ab_semigroup_add_imp_le \(+)\ \(-)\ \(\#)\ \(\#)\ by standard (auto simp add: subset_mset_def subseteq_mset_def multiset_eq_iff intro: order_trans antisym) \ \FIXME: avoid junk stemming from type class interpretation\ interpretation subset_mset: ordered_ab_semigroup_monoid_add_imp_le "(+)" 0 "(-)" "(\#)" "(\#)" by standard \ \FIXME: avoid junk stemming from type class interpretation\ lemma mset_subset_eqI: "(\a. count A a \ count B a) \ A \# B" by (simp add: subseteq_mset_def) lemma mset_subset_eq_count: "A \# B \ count A a \ count B a" by (simp add: subseteq_mset_def) lemma mset_subset_eq_exists_conv: "(A::'a multiset) \# B \ (\C. B = A + C)" unfolding subseteq_mset_def apply (rule iffI) apply (rule exI [where x = "B - A"]) apply (auto intro: multiset_eq_iff [THEN iffD2]) done interpretation subset_mset: ordered_cancel_comm_monoid_diff "(+)" 0 "(\#)" "(\#)" "(-)" by standard (simp, fact mset_subset_eq_exists_conv) \ \FIXME: avoid junk stemming from type class interpretation\ declare subset_mset.add_diff_assoc[simp] subset_mset.add_diff_assoc2[simp] lemma mset_subset_eq_mono_add_right_cancel: "(A::'a multiset) + C \# B + C \ A \# B" by (fact subset_mset.add_le_cancel_right) lemma mset_subset_eq_mono_add_left_cancel: "C + (A::'a multiset) \# C + B \ A \# B" by (fact subset_mset.add_le_cancel_left) lemma mset_subset_eq_mono_add: "(A::'a multiset) \# B \ C \# D \ A + C \# B + D" by (fact subset_mset.add_mono) lemma mset_subset_eq_add_left: "(A::'a multiset) \# A + B" by simp lemma mset_subset_eq_add_right: "B \# (A::'a multiset) + B" by simp lemma single_subset_iff [simp]: "{#a#} \# M \ a \# M" by (auto simp add: subseteq_mset_def Suc_le_eq) lemma mset_subset_eq_single: "a \# B \ {#a#} \# B" by simp lemma mset_subset_eq_add_mset_cancel: \add_mset a A \# add_mset a B \ A \# B\ unfolding add_mset_add_single[of _ A] add_mset_add_single[of _ B] by (rule mset_subset_eq_mono_add_right_cancel) lemma multiset_diff_union_assoc: fixes A B C D :: "'a multiset" shows "C \# B \ A + B - C = A + (B - C)" by (fact subset_mset.diff_add_assoc) lemma mset_subset_eq_multiset_union_diff_commute: fixes A B C D :: "'a multiset" shows "B \# A \ A - B + C = A + C - B" by (fact subset_mset.add_diff_assoc2) lemma diff_subset_eq_self[simp]: "(M::'a multiset) - N \# M" by (simp add: subseteq_mset_def) lemma mset_subset_eqD: assumes "A \# B" and "x \# A" shows "x \# B" proof - from \x \# A\ have "count A x > 0" by simp also from \A \# B\ have "count A x \ count B x" by (simp add: subseteq_mset_def) finally show ?thesis by simp qed lemma mset_subsetD: "A \# B \ x \# A \ x \# B" by (auto intro: mset_subset_eqD [of A]) lemma set_mset_mono: "A \# B \ set_mset A \ set_mset B" by (metis mset_subset_eqD subsetI) lemma mset_subset_eq_insertD: "add_mset x A \# B \ x \# B \ A \# B" apply (rule conjI) apply (simp add: mset_subset_eqD) apply (clarsimp simp: subset_mset_def subseteq_mset_def) apply safe apply (erule_tac x = a in allE) apply (auto split: if_split_asm) done lemma mset_subset_insertD: "add_mset x A \# B \ x \# B \ A \# B" by (rule mset_subset_eq_insertD) simp lemma mset_subset_of_empty[simp]: "A \# {#} \ False" by (simp only: subset_mset.not_less_zero) lemma empty_subset_add_mset[simp]: "{#} \# add_mset x M" by (auto intro: subset_mset.gr_zeroI) lemma empty_le: "{#} \# A" by (fact subset_mset.zero_le) lemma insert_subset_eq_iff: "add_mset a A \# B \ a \# B \ A \# B - {#a#}" using le_diff_conv2 [of "Suc 0" "count B a" "count A a"] apply (auto simp add: subseteq_mset_def not_in_iff Suc_le_eq) apply (rule ccontr) apply (auto simp add: not_in_iff) done lemma insert_union_subset_iff: "add_mset a A \# B \ a \# B \ A \# B - {#a#}" by (auto simp add: insert_subset_eq_iff subset_mset_def) lemma subset_eq_diff_conv: "A - C \# B \ A \# B + C" by (simp add: subseteq_mset_def le_diff_conv) lemma multi_psub_of_add_self [simp]: "A \# add_mset x A" by (auto simp: subset_mset_def subseteq_mset_def) lemma multi_psub_self: "A \# A = False" by simp lemma mset_subset_add_mset [simp]: "add_mset x N \# add_mset x M \ N \# M" unfolding add_mset_add_single[of _ N] add_mset_add_single[of _ M] by (fact subset_mset.add_less_cancel_right) lemma mset_subset_diff_self: "c \# B \ B - {#c#} \# B" by (auto simp: subset_mset_def elim: mset_add) lemma Diff_eq_empty_iff_mset: "A - B = {#} \ A \# B" by (auto simp: multiset_eq_iff subseteq_mset_def) lemma add_mset_subseteq_single_iff[iff]: "add_mset a M \# {#b#} \ M = {#} \ a = b" proof assume A: "add_mset a M \# {#b#}" then have \a = b\ by (auto dest: mset_subset_eq_insertD) then show "M={#} \ a=b" using A by (simp add: mset_subset_eq_add_mset_cancel) qed simp subsubsection \Intersection and bounded union\ definition inter_mset :: \'a multiset \ 'a multiset \ 'a multiset\ (infixl \\#\ 70) where \A \# B = A - (A - B)\ lemma count_inter_mset [simp]: \count (A \# B) x = min (count A x) (count B x)\ by (simp add: inter_mset_def) (*global_interpretation subset_mset: semilattice_order \(\#)\ \(\#)\ \(\#)\ by standard (simp_all add: multiset_eq_iff subseteq_mset_def subset_mset_def min_def)*) interpretation subset_mset: semilattice_inf \(\#)\ \(\#)\ \(\#)\ by standard (simp_all add: multiset_eq_iff subseteq_mset_def) \ \FIXME: avoid junk stemming from type class interpretation\ definition union_mset :: \'a multiset \ 'a multiset \ 'a multiset\ (infixl \\#\ 70) where \A \# B = A + (B - A)\ lemma count_union_mset [simp]: \count (A \# B) x = max (count A x) (count B x)\ by (simp add: union_mset_def) global_interpretation subset_mset: semilattice_neutr_order \(\#)\ \{#}\ \(\#)\ \(\#)\ apply standard apply (simp_all add: multiset_eq_iff subseteq_mset_def subset_mset_def max_def) apply (auto simp add: le_antisym dest: sym) apply (metis nat_le_linear)+ done interpretation subset_mset: semilattice_sup \(\#)\ \(\#)\ \(\#)\ proof - have [simp]: "m \ n \ q \ n \ m + (q - m) \ n" for m n q :: nat by arith show "class.semilattice_sup (\#) (\#) (\#)" by standard (auto simp add: union_mset_def subseteq_mset_def) qed \ \FIXME: avoid junk stemming from type class interpretation\ interpretation subset_mset: bounded_lattice_bot "(\#)" "(\#)" "(\#)" "(\#)" "{#}" by standard auto \ \FIXME: avoid junk stemming from type class interpretation\ subsubsection \Additional intersection facts\ lemma set_mset_inter [simp]: "set_mset (A \# B) = set_mset A \ set_mset B" by (simp only: set_mset_def) auto lemma diff_intersect_left_idem [simp]: "M - M \# N = M - N" by (simp add: multiset_eq_iff min_def) lemma diff_intersect_right_idem [simp]: "M - N \# M = M - N" by (simp add: multiset_eq_iff min_def) lemma multiset_inter_single[simp]: "a \ b \ {#a#} \# {#b#} = {#}" by (rule multiset_eqI) auto lemma multiset_union_diff_commute: assumes "B \# C = {#}" shows "A + B - C = A - C + B" proof (rule multiset_eqI) fix x from assms have "min (count B x) (count C x) = 0" by (auto simp add: multiset_eq_iff) then have "count B x = 0 \ count C x = 0" unfolding min_def by (auto split: if_splits) then show "count (A + B - C) x = count (A - C + B) x" by auto qed lemma disjunct_not_in: "A \# B = {#} \ (\a. a \# A \ a \# B)" (is "?P \ ?Q") proof assume ?P show ?Q proof fix a from \?P\ have "min (count A a) (count B a) = 0" by (simp add: multiset_eq_iff) then have "count A a = 0 \ count B a = 0" by (cases "count A a \ count B a") (simp_all add: min_def) then show "a \# A \ a \# B" by (simp add: not_in_iff) qed next assume ?Q show ?P proof (rule multiset_eqI) fix a from \?Q\ have "count A a = 0 \ count B a = 0" by (auto simp add: not_in_iff) then show "count (A \# B) a = count {#} a" by auto qed qed lemma inter_mset_empty_distrib_right: "A \# (B + C) = {#} \ A \# B = {#} \ A \# C = {#}" by (meson disjunct_not_in union_iff) lemma inter_mset_empty_distrib_left: "(A + B) \# C = {#} \ A \# C = {#} \ B \# C = {#}" by (meson disjunct_not_in union_iff) lemma add_mset_inter_add_mset [simp]: "add_mset a A \# add_mset a B = add_mset a (A \# B)" by (rule multiset_eqI) simp lemma add_mset_disjoint [simp]: "add_mset a A \# B = {#} \ a \# B \ A \# B = {#}" "{#} = add_mset a A \# B \ a \# B \ {#} = A \# B" by (auto simp: disjunct_not_in) lemma disjoint_add_mset [simp]: "B \# add_mset a A = {#} \ a \# B \ B \# A = {#}" "{#} = A \# add_mset b B \ b \# A \ {#} = A \# B" by (auto simp: disjunct_not_in) lemma inter_add_left1: "\ x \# N \ (add_mset x M) \# N = M \# N" by (simp add: multiset_eq_iff not_in_iff) lemma inter_add_left2: "x \# N \ (add_mset x M) \# N = add_mset x (M \# (N - {#x#}))" by (auto simp add: multiset_eq_iff elim: mset_add) lemma inter_add_right1: "\ x \# N \ N \# (add_mset x M) = N \# M" by (simp add: multiset_eq_iff not_in_iff) lemma inter_add_right2: "x \# N \ N \# (add_mset x M) = add_mset x ((N - {#x#}) \# M)" by (auto simp add: multiset_eq_iff elim: mset_add) lemma disjunct_set_mset_diff: assumes "M \# N = {#}" shows "set_mset (M - N) = set_mset M" proof (rule set_eqI) fix a from assms have "a \# M \ a \# N" by (simp add: disjunct_not_in) then show "a \# M - N \ a \# M" by (auto dest: in_diffD) (simp add: in_diff_count not_in_iff) qed lemma at_most_one_mset_mset_diff: assumes "a \# M - {#a#}" shows "set_mset (M - {#a#}) = set_mset M - {a}" using assms by (auto simp add: not_in_iff in_diff_count set_eq_iff) lemma more_than_one_mset_mset_diff: assumes "a \# M - {#a#}" shows "set_mset (M - {#a#}) = set_mset M" proof (rule set_eqI) fix b have "Suc 0 < count M b \ count M b > 0" by arith then show "b \# M - {#a#} \ b \# M" using assms by (auto simp add: in_diff_count) qed lemma inter_iff: "a \# A \# B \ a \# A \ a \# B" by simp lemma inter_union_distrib_left: "A \# B + C = (A + C) \# (B + C)" by (simp add: multiset_eq_iff min_add_distrib_left) lemma inter_union_distrib_right: "C + A \# B = (C + A) \# (C + B)" using inter_union_distrib_left [of A B C] by (simp add: ac_simps) lemma inter_subset_eq_union: "A \# B \# A + B" by (auto simp add: subseteq_mset_def) subsubsection \Additional bounded union facts\ lemma set_mset_sup [simp]: \set_mset (A \# B) = set_mset A \ set_mset B\ by (simp only: set_mset_def) (auto simp add: less_max_iff_disj) lemma sup_union_left1 [simp]: "\ x \# N \ (add_mset x M) \# N = add_mset x (M \# N)" by (simp add: multiset_eq_iff not_in_iff) lemma sup_union_left2: "x \# N \ (add_mset x M) \# N = add_mset x (M \# (N - {#x#}))" by (simp add: multiset_eq_iff) lemma sup_union_right1 [simp]: "\ x \# N \ N \# (add_mset x M) = add_mset x (N \# M)" by (simp add: multiset_eq_iff not_in_iff) lemma sup_union_right2: "x \# N \ N \# (add_mset x M) = add_mset x ((N - {#x#}) \# M)" by (simp add: multiset_eq_iff) lemma sup_union_distrib_left: "A \# B + C = (A + C) \# (B + C)" by (simp add: multiset_eq_iff max_add_distrib_left) lemma union_sup_distrib_right: "C + A \# B = (C + A) \# (C + B)" using sup_union_distrib_left [of A B C] by (simp add: ac_simps) lemma union_diff_inter_eq_sup: "A + B - A \# B = A \# B" by (auto simp add: multiset_eq_iff) lemma union_diff_sup_eq_inter: "A + B - A \# B = A \# B" by (auto simp add: multiset_eq_iff) lemma add_mset_union: \add_mset a A \# add_mset a B = add_mset a (A \# B)\ by (auto simp: multiset_eq_iff max_def) subsection \Replicate and repeat operations\ definition replicate_mset :: "nat \ 'a \ 'a multiset" where "replicate_mset n x = (add_mset x ^^ n) {#}" lemma replicate_mset_0[simp]: "replicate_mset 0 x = {#}" unfolding replicate_mset_def by simp lemma replicate_mset_Suc [simp]: "replicate_mset (Suc n) x = add_mset x (replicate_mset n x)" unfolding replicate_mset_def by (induct n) (auto intro: add.commute) lemma count_replicate_mset[simp]: "count (replicate_mset n x) y = (if y = x then n else 0)" unfolding replicate_mset_def by (induct n) auto lift_definition repeat_mset :: \nat \ 'a multiset \ 'a multiset\ is \\n M a. n * M a\ by simp lemma count_repeat_mset [simp]: "count (repeat_mset i A) a = i * count A a" by transfer rule lemma repeat_mset_0 [simp]: \repeat_mset 0 M = {#}\ by transfer simp lemma repeat_mset_Suc [simp]: \repeat_mset (Suc n) M = M + repeat_mset n M\ by transfer simp lemma repeat_mset_right [simp]: "repeat_mset a (repeat_mset b A) = repeat_mset (a * b) A" by (auto simp: multiset_eq_iff left_diff_distrib') lemma left_diff_repeat_mset_distrib': \repeat_mset (i - j) u = repeat_mset i u - repeat_mset j u\ by (auto simp: multiset_eq_iff left_diff_distrib') lemma left_add_mult_distrib_mset: "repeat_mset i u + (repeat_mset j u + k) = repeat_mset (i+j) u + k" by (auto simp: multiset_eq_iff add_mult_distrib) lemma repeat_mset_distrib: "repeat_mset (m + n) A = repeat_mset m A + repeat_mset n A" by (auto simp: multiset_eq_iff Nat.add_mult_distrib) lemma repeat_mset_distrib2[simp]: "repeat_mset n (A + B) = repeat_mset n A + repeat_mset n B" by (auto simp: multiset_eq_iff add_mult_distrib2) lemma repeat_mset_replicate_mset[simp]: "repeat_mset n {#a#} = replicate_mset n a" by (auto simp: multiset_eq_iff) lemma repeat_mset_distrib_add_mset[simp]: "repeat_mset n (add_mset a A) = replicate_mset n a + repeat_mset n A" by (auto simp: multiset_eq_iff) lemma repeat_mset_empty[simp]: "repeat_mset n {#} = {#}" by transfer simp subsubsection \Simprocs\ lemma repeat_mset_iterate_add: \repeat_mset n M = iterate_add n M\ unfolding iterate_add_def by (induction n) auto lemma mset_subseteq_add_iff1: "j \ (i::nat) \ (repeat_mset i u + m \# repeat_mset j u + n) = (repeat_mset (i-j) u + m \# n)" by (auto simp add: subseteq_mset_def nat_le_add_iff1) lemma mset_subseteq_add_iff2: "i \ (j::nat) \ (repeat_mset i u + m \# repeat_mset j u + n) = (m \# repeat_mset (j-i) u + n)" by (auto simp add: subseteq_mset_def nat_le_add_iff2) lemma mset_subset_add_iff1: "j \ (i::nat) \ (repeat_mset i u + m \# repeat_mset j u + n) = (repeat_mset (i-j) u + m \# n)" unfolding subset_mset_def repeat_mset_iterate_add by (simp add: iterate_add_eq_add_iff1 mset_subseteq_add_iff1[unfolded repeat_mset_iterate_add]) lemma mset_subset_add_iff2: "i \ (j::nat) \ (repeat_mset i u + m \# repeat_mset j u + n) = (m \# repeat_mset (j-i) u + n)" unfolding subset_mset_def repeat_mset_iterate_add by (simp add: iterate_add_eq_add_iff2 mset_subseteq_add_iff2[unfolded repeat_mset_iterate_add]) ML_file \multiset_simprocs.ML\ lemma add_mset_replicate_mset_safe[cancelation_simproc_pre]: \NO_MATCH {#} M \ add_mset a M = {#a#} + M\ by simp declare repeat_mset_iterate_add[cancelation_simproc_pre] declare iterate_add_distrib[cancelation_simproc_pre] declare repeat_mset_iterate_add[symmetric, cancelation_simproc_post] declare add_mset_not_empty[cancelation_simproc_eq_elim] empty_not_add_mset[cancelation_simproc_eq_elim] subset_mset.le_zero_eq[cancelation_simproc_eq_elim] empty_not_add_mset[cancelation_simproc_eq_elim] add_mset_not_empty[cancelation_simproc_eq_elim] subset_mset.le_zero_eq[cancelation_simproc_eq_elim] le_zero_eq[cancelation_simproc_eq_elim] simproc_setup mseteq_cancel ("(l::'a multiset) + m = n" | "(l::'a multiset) = m + n" | "add_mset a m = n" | "m = add_mset a n" | "replicate_mset p a = n" | "m = replicate_mset p a" | "repeat_mset p m = n" | "m = repeat_mset p m") = \fn phi => Cancel_Simprocs.eq_cancel\ simproc_setup msetsubset_cancel ("(l::'a multiset) + m \# n" | "(l::'a multiset) \# m + n" | "add_mset a m \# n" | "m \# add_mset a n" | "replicate_mset p r \# n" | "m \# replicate_mset p r" | "repeat_mset p m \# n" | "m \# repeat_mset p m") = \fn phi => Multiset_Simprocs.subset_cancel_msets\ simproc_setup msetsubset_eq_cancel ("(l::'a multiset) + m \# n" | "(l::'a multiset) \# m + n" | "add_mset a m \# n" | "m \# add_mset a n" | "replicate_mset p r \# n" | "m \# replicate_mset p r" | "repeat_mset p m \# n" | "m \# repeat_mset p m") = \fn phi => Multiset_Simprocs.subseteq_cancel_msets\ simproc_setup msetdiff_cancel ("((l::'a multiset) + m) - n" | "(l::'a multiset) - (m + n)" | "add_mset a m - n" | "m - add_mset a n" | "replicate_mset p r - n" | "m - replicate_mset p r" | "repeat_mset p m - n" | "m - repeat_mset p m") = \fn phi => Cancel_Simprocs.diff_cancel\ subsubsection \Conditionally complete lattice\ instantiation multiset :: (type) Inf begin lift_definition Inf_multiset :: "'a multiset set \ 'a multiset" is "\A i. if A = {} then 0 else Inf ((\f. f i) ` A)" proof - fix A :: "('a \ nat) set" assume *: "\f. f \ A \ finite {x. 0 < f x}" show \finite {i. 0 < (if A = {} then 0 else INF f\A. f i)}\ proof (cases "A = {}") case False then obtain f where "f \ A" by blast hence "{i. Inf ((\f. f i) ` A) > 0} \ {i. f i > 0}" by (auto intro: less_le_trans[OF _ cInf_lower]) moreover from \f \ A\ * have "finite \" by simp ultimately have "finite {i. Inf ((\f. f i) ` A) > 0}" by (rule finite_subset) with False show ?thesis by simp qed simp_all qed instance .. end lemma Inf_multiset_empty: "Inf {} = {#}" by transfer simp_all lemma count_Inf_multiset_nonempty: "A \ {} \ count (Inf A) x = Inf ((\X. count X x) ` A)" by transfer simp_all instantiation multiset :: (type) Sup begin definition Sup_multiset :: "'a multiset set \ 'a multiset" where "Sup_multiset A = (if A \ {} \ subset_mset.bdd_above A then Abs_multiset (\i. Sup ((\X. count X i) ` A)) else {#})" lemma Sup_multiset_empty: "Sup {} = {#}" by (simp add: Sup_multiset_def) lemma Sup_multiset_unbounded: "\ subset_mset.bdd_above A \ Sup A = {#}" by (simp add: Sup_multiset_def) instance .. end lemma bdd_above_multiset_imp_bdd_above_count: assumes "subset_mset.bdd_above (A :: 'a multiset set)" shows "bdd_above ((\X. count X x) ` A)" proof - from assms obtain Y where Y: "\X\A. X \# Y" by (meson subset_mset.bdd_above.E) hence "count X x \ count Y x" if "X \ A" for X using that by (auto intro: mset_subset_eq_count) thus ?thesis by (intro bdd_aboveI[of _ "count Y x"]) auto qed lemma bdd_above_multiset_imp_finite_support: assumes "A \ {}" "subset_mset.bdd_above (A :: 'a multiset set)" shows "finite (\X\A. {x. count X x > 0})" proof - from assms obtain Y where Y: "\X\A. X \# Y" by (meson subset_mset.bdd_above.E) hence "count X x \ count Y x" if "X \ A" for X x using that by (auto intro: mset_subset_eq_count) hence "(\X\A. {x. count X x > 0}) \ {x. count Y x > 0}" by safe (erule less_le_trans) moreover have "finite \" by simp ultimately show ?thesis by (rule finite_subset) qed lemma Sup_multiset_in_multiset: \finite {i. 0 < (SUP M\A. count M i)}\ if \A \ {}\ \subset_mset.bdd_above A\ proof - have "{i. Sup ((\X. count X i) ` A) > 0} \ (\X\A. {i. 0 < count X i})" proof safe fix i assume pos: "(SUP X\A. count X i) > 0" show "i \ (\X\A. {i. 0 < count X i})" proof (rule ccontr) assume "i \ (\X\A. {i. 0 < count X i})" hence "\X\A. count X i \ 0" by (auto simp: count_eq_zero_iff) with that have "(SUP X\A. count X i) \ 0" by (intro cSup_least bdd_above_multiset_imp_bdd_above_count) auto with pos show False by simp qed qed moreover from that have "finite \" by (rule bdd_above_multiset_imp_finite_support) ultimately show "finite {i. Sup ((\X. count X i) ` A) > 0}" by (rule finite_subset) qed lemma count_Sup_multiset_nonempty: \count (Sup A) x = (SUP X\A. count X x)\ if \A \ {}\ \subset_mset.bdd_above A\ using that by (simp add: Sup_multiset_def Sup_multiset_in_multiset count_Abs_multiset) interpretation subset_mset: conditionally_complete_lattice Inf Sup "(\#)" "(\#)" "(\#)" "(\#)" proof fix X :: "'a multiset" and A assume "X \ A" show "Inf A \# X" proof (rule mset_subset_eqI) fix x from \X \ A\ have "A \ {}" by auto hence "count (Inf A) x = (INF X\A. count X x)" by (simp add: count_Inf_multiset_nonempty) also from \X \ A\ have "\ \ count X x" by (intro cInf_lower) simp_all finally show "count (Inf A) x \ count X x" . qed next fix X :: "'a multiset" and A assume nonempty: "A \ {}" and le: "\Y. Y \ A \ X \# Y" show "X \# Inf A" proof (rule mset_subset_eqI) fix x from nonempty have "count X x \ (INF X\A. count X x)" by (intro cInf_greatest) (auto intro: mset_subset_eq_count le) also from nonempty have "\ = count (Inf A) x" by (simp add: count_Inf_multiset_nonempty) finally show "count X x \ count (Inf A) x" . qed next fix X :: "'a multiset" and A assume X: "X \ A" and bdd: "subset_mset.bdd_above A" show "X \# Sup A" proof (rule mset_subset_eqI) fix x from X have "A \ {}" by auto have "count X x \ (SUP X\A. count X x)" by (intro cSUP_upper X bdd_above_multiset_imp_bdd_above_count bdd) also from count_Sup_multiset_nonempty[OF \A \ {}\ bdd] have "(SUP X\A. count X x) = count (Sup A) x" by simp finally show "count X x \ count (Sup A) x" . qed next fix X :: "'a multiset" and A assume nonempty: "A \ {}" and ge: "\Y. Y \ A \ Y \# X" from ge have bdd: "subset_mset.bdd_above A" by blast show "Sup A \# X" proof (rule mset_subset_eqI) fix x from count_Sup_multiset_nonempty[OF \A \ {}\ bdd] have "count (Sup A) x = (SUP X\A. count X x)" . also from nonempty have "\ \ count X x" by (intro cSup_least) (auto intro: mset_subset_eq_count ge) finally show "count (Sup A) x \ count X x" . qed qed \ \FIXME: avoid junk stemming from type class interpretation\ lemma set_mset_Inf: assumes "A \ {}" shows "set_mset (Inf A) = (\X\A. set_mset X)" proof safe fix x X assume "x \# Inf A" "X \ A" hence nonempty: "A \ {}" by (auto simp: Inf_multiset_empty) from \x \# Inf A\ have "{#x#} \# Inf A" by auto also from \X \ A\ have "\ \# X" by (rule subset_mset.cInf_lower) simp_all finally show "x \# X" by simp next fix x assume x: "x \ (\X\A. set_mset X)" hence "{#x#} \# X" if "X \ A" for X using that by auto from assms and this have "{#x#} \# Inf A" by (rule subset_mset.cInf_greatest) thus "x \# Inf A" by simp qed lemma in_Inf_multiset_iff: assumes "A \ {}" shows "x \# Inf A \ (\X\A. x \# X)" proof - from assms have "set_mset (Inf A) = (\X\A. set_mset X)" by (rule set_mset_Inf) also have "x \ \ \ (\X\A. x \# X)" by simp finally show ?thesis . qed lemma in_Inf_multisetD: "x \# Inf A \ X \ A \ x \# X" by (subst (asm) in_Inf_multiset_iff) auto lemma set_mset_Sup: assumes "subset_mset.bdd_above A" shows "set_mset (Sup A) = (\X\A. set_mset X)" proof safe fix x assume "x \# Sup A" hence nonempty: "A \ {}" by (auto simp: Sup_multiset_empty) show "x \ (\X\A. set_mset X)" proof (rule ccontr) assume x: "x \ (\X\A. set_mset X)" have "count X x \ count (Sup A) x" if "X \ A" for X x using that by (intro mset_subset_eq_count subset_mset.cSup_upper assms) with x have "X \# Sup A - {#x#}" if "X \ A" for X using that by (auto simp: subseteq_mset_def algebra_simps not_in_iff) hence "Sup A \# Sup A - {#x#}" by (intro subset_mset.cSup_least nonempty) with \x \# Sup A\ show False by (auto simp: subseteq_mset_def simp flip: count_greater_zero_iff dest!: spec[of _ x]) qed next fix x X assume "x \ set_mset X" "X \ A" hence "{#x#} \# X" by auto also have "X \# Sup A" by (intro subset_mset.cSup_upper \X \ A\ assms) finally show "x \ set_mset (Sup A)" by simp qed lemma in_Sup_multiset_iff: assumes "subset_mset.bdd_above A" shows "x \# Sup A \ (\X\A. x \# X)" proof - from assms have "set_mset (Sup A) = (\X\A. set_mset X)" by (rule set_mset_Sup) also have "x \ \ \ (\X\A. x \# X)" by simp finally show ?thesis . qed lemma in_Sup_multisetD: assumes "x \# Sup A" shows "\X\A. x \# X" proof - have "subset_mset.bdd_above A" by (rule ccontr) (insert assms, simp_all add: Sup_multiset_unbounded) with assms show ?thesis by (simp add: in_Sup_multiset_iff) qed interpretation subset_mset: distrib_lattice "(\#)" "(\#)" "(\#)" "(\#)" proof fix A B C :: "'a multiset" show "A \# (B \# C) = A \# B \# (A \# C)" by (intro multiset_eqI) simp_all qed \ \FIXME: avoid junk stemming from type class interpretation\ subsubsection \Filter (with comprehension syntax)\ text \Multiset comprehension\ lift_definition filter_mset :: "('a \ bool) \ 'a multiset \ 'a multiset" is "\P M. \x. if P x then M x else 0" by (rule filter_preserves_multiset) syntax (ASCII) "_MCollect" :: "pttrn \ 'a multiset \ bool \ 'a multiset" ("(1{#_ :# _./ _#})") syntax "_MCollect" :: "pttrn \ 'a multiset \ bool \ 'a multiset" ("(1{#_ \# _./ _#})") translations "{#x \# M. P#}" == "CONST filter_mset (\x. P) M" lemma count_filter_mset [simp]: "count (filter_mset P M) a = (if P a then count M a else 0)" by (simp add: filter_mset.rep_eq) lemma set_mset_filter [simp]: "set_mset (filter_mset P M) = {a \ set_mset M. P a}" by (simp only: set_eq_iff count_greater_zero_iff [symmetric] count_filter_mset) simp lemma filter_empty_mset [simp]: "filter_mset P {#} = {#}" by (rule multiset_eqI) simp lemma filter_single_mset: "filter_mset P {#x#} = (if P x then {#x#} else {#})" by (rule multiset_eqI) simp lemma filter_union_mset [simp]: "filter_mset P (M + N) = filter_mset P M + filter_mset P N" by (rule multiset_eqI) simp lemma filter_diff_mset [simp]: "filter_mset P (M - N) = filter_mset P M - filter_mset P N" by (rule multiset_eqI) simp lemma filter_inter_mset [simp]: "filter_mset P (M \# N) = filter_mset P M \# filter_mset P N" by (rule multiset_eqI) simp lemma filter_sup_mset[simp]: "filter_mset P (A \# B) = filter_mset P A \# filter_mset P B" by (rule multiset_eqI) simp lemma filter_mset_add_mset [simp]: "filter_mset P (add_mset x A) = (if P x then add_mset x (filter_mset P A) else filter_mset P A)" by (auto simp: multiset_eq_iff) lemma multiset_filter_subset[simp]: "filter_mset f M \# M" by (simp add: mset_subset_eqI) lemma multiset_filter_mono: assumes "A \# B" shows "filter_mset f A \# filter_mset f B" proof - from assms[unfolded mset_subset_eq_exists_conv] obtain C where B: "B = A + C" by auto show ?thesis unfolding B by auto qed lemma filter_mset_eq_conv: "filter_mset P M = N \ N \# M \ (\b\#N. P b) \ (\a\#M - N. \ P a)" (is "?P \ ?Q") proof assume ?P then show ?Q by auto (simp add: multiset_eq_iff in_diff_count) next assume ?Q then obtain Q where M: "M = N + Q" by (auto simp add: mset_subset_eq_exists_conv) then have MN: "M - N = Q" by simp show ?P proof (rule multiset_eqI) fix a from \?Q\ MN have *: "\ P a \ a \# N" "P a \ a \# Q" by auto show "count (filter_mset P M) a = count N a" proof (cases "a \# M") case True with * show ?thesis by (simp add: not_in_iff M) next case False then have "count M a = 0" by (simp add: not_in_iff) with M show ?thesis by simp qed qed qed lemma filter_filter_mset: "filter_mset P (filter_mset Q M) = {#x \# M. Q x \ P x#}" by (auto simp: multiset_eq_iff) lemma filter_mset_True[simp]: "{#y \# M. True#} = M" and filter_mset_False[simp]: "{#y \# M. False#} = {#}" by (auto simp: multiset_eq_iff) subsubsection \Size\ definition wcount where "wcount f M = (\x. count M x * Suc (f x))" lemma wcount_union: "wcount f (M + N) a = wcount f M a + wcount f N a" by (auto simp: wcount_def add_mult_distrib) lemma wcount_add_mset: "wcount f (add_mset x M) a = (if x = a then Suc (f a) else 0) + wcount f M a" unfolding add_mset_add_single[of _ M] wcount_union by (auto simp: wcount_def) definition size_multiset :: "('a \ nat) \ 'a multiset \ nat" where "size_multiset f M = sum (wcount f M) (set_mset M)" lemmas size_multiset_eq = size_multiset_def[unfolded wcount_def] instantiation multiset :: (type) size begin definition size_multiset where size_multiset_overloaded_def: "size_multiset = Multiset.size_multiset (\_. 0)" instance .. end lemmas size_multiset_overloaded_eq = size_multiset_overloaded_def[THEN fun_cong, unfolded size_multiset_eq, simplified] lemma size_multiset_empty [simp]: "size_multiset f {#} = 0" by (simp add: size_multiset_def) lemma size_empty [simp]: "size {#} = 0" by (simp add: size_multiset_overloaded_def) lemma size_multiset_single : "size_multiset f {#b#} = Suc (f b)" by (simp add: size_multiset_eq) lemma size_single: "size {#b#} = 1" by (simp add: size_multiset_overloaded_def size_multiset_single) lemma sum_wcount_Int: "finite A \ sum (wcount f N) (A \ set_mset N) = sum (wcount f N) A" by (induct rule: finite_induct) (simp_all add: Int_insert_left wcount_def count_eq_zero_iff) lemma size_multiset_union [simp]: "size_multiset f (M + N::'a multiset) = size_multiset f M + size_multiset f N" apply (simp add: size_multiset_def sum_Un_nat sum.distrib sum_wcount_Int wcount_union) apply (subst Int_commute) apply (simp add: sum_wcount_Int) done lemma size_multiset_add_mset [simp]: "size_multiset f (add_mset a M) = Suc (f a) + size_multiset f M" unfolding add_mset_add_single[of _ M] size_multiset_union by (auto simp: size_multiset_single) lemma size_add_mset [simp]: "size (add_mset a A) = Suc (size A)" by (simp add: size_multiset_overloaded_def wcount_add_mset) lemma size_union [simp]: "size (M + N::'a multiset) = size M + size N" by (auto simp add: size_multiset_overloaded_def) lemma size_multiset_eq_0_iff_empty [iff]: "size_multiset f M = 0 \ M = {#}" by (auto simp add: size_multiset_eq count_eq_zero_iff) lemma size_eq_0_iff_empty [iff]: "(size M = 0) = (M = {#})" by (auto simp add: size_multiset_overloaded_def) lemma nonempty_has_size: "(S \ {#}) = (0 < size S)" by (metis gr0I gr_implies_not0 size_empty size_eq_0_iff_empty) lemma size_eq_Suc_imp_elem: "size M = Suc n \ \a. a \# M" apply (unfold size_multiset_overloaded_eq) apply (drule sum_SucD) apply auto done lemma size_eq_Suc_imp_eq_union: assumes "size M = Suc n" shows "\a N. M = add_mset a N" proof - from assms obtain a where "a \# M" by (erule size_eq_Suc_imp_elem [THEN exE]) then have "M = add_mset a (M - {#a#})" by simp then show ?thesis by blast qed lemma size_mset_mono: fixes A B :: "'a multiset" assumes "A \# B" shows "size A \ size B" proof - from assms[unfolded mset_subset_eq_exists_conv] obtain C where B: "B = A + C" by auto show ?thesis unfolding B by (induct C) auto qed lemma size_filter_mset_lesseq[simp]: "size (filter_mset f M) \ size M" by (rule size_mset_mono[OF multiset_filter_subset]) lemma size_Diff_submset: "M \# M' \ size (M' - M) = size M' - size(M::'a multiset)" by (metis add_diff_cancel_left' size_union mset_subset_eq_exists_conv) subsection \Induction and case splits\ theorem multiset_induct [case_names empty add, induct type: multiset]: assumes empty: "P {#}" assumes add: "\x M. P M \ P (add_mset x M)" shows "P M" proof (induct "size M" arbitrary: M) case 0 thus "P M" by (simp add: empty) next case (Suc k) obtain N x where "M = add_mset x N" using \Suc k = size M\ [symmetric] using size_eq_Suc_imp_eq_union by fast with Suc add show "P M" by simp qed lemma multiset_induct_min[case_names empty add]: fixes M :: "'a::linorder multiset" assumes empty: "P {#}" and add: "\x M. P M \ (\y \# M. y \ x) \ P (add_mset x M)" shows "P M" proof (induct "size M" arbitrary: M) case (Suc k) note ih = this(1) and Sk_eq_sz_M = this(2) let ?y = "Min_mset M" let ?N = "M - {#?y#}" have M: "M = add_mset ?y ?N" by (metis Min_in Sk_eq_sz_M finite_set_mset insert_DiffM lessI not_less_zero set_mset_eq_empty_iff size_empty) show ?case by (subst M, rule add, rule ih, metis M Sk_eq_sz_M nat.inject size_add_mset, meson Min_le finite_set_mset in_diffD) qed (simp add: empty) lemma multiset_induct_max[case_names empty add]: fixes M :: "'a::linorder multiset" assumes empty: "P {#}" and add: "\x M. P M \ (\y \# M. y \ x) \ P (add_mset x M)" shows "P M" proof (induct "size M" arbitrary: M) case (Suc k) note ih = this(1) and Sk_eq_sz_M = this(2) let ?y = "Max_mset M" let ?N = "M - {#?y#}" have M: "M = add_mset ?y ?N" by (metis Max_in Sk_eq_sz_M finite_set_mset insert_DiffM lessI not_less_zero set_mset_eq_empty_iff size_empty) show ?case by (subst M, rule add, rule ih, metis M Sk_eq_sz_M nat.inject size_add_mset, meson Max_ge finite_set_mset in_diffD) qed (simp add: empty) lemma multi_nonempty_split: "M \ {#} \ \A a. M = add_mset a A" by (induct M) auto lemma multiset_cases [cases type]: obtains (empty) "M = {#}" | (add) x N where "M = add_mset x N" by (induct M) simp_all lemma multi_drop_mem_not_eq: "c \# B \ B - {#c#} \ B" by (cases "B = {#}") (auto dest: multi_member_split) lemma union_filter_mset_complement[simp]: "\x. P x = (\ Q x) \ filter_mset P M + filter_mset Q M = M" by (subst multiset_eq_iff) auto lemma multiset_partition: "M = {#x \# M. P x#} + {#x \# M. \ P x#}" by simp lemma mset_subset_size: "A \# B \ size A < size B" proof (induct A arbitrary: B) case empty then show ?case using nonempty_has_size by auto next case (add x A) have "add_mset x A \# B" by (meson add.prems subset_mset_def) then show ?case by (metis (no_types) add.prems add.right_neutral add_diff_cancel_left' leD nat_neq_iff size_Diff_submset size_eq_0_iff_empty size_mset_mono subset_mset.le_iff_add subset_mset_def) qed lemma size_1_singleton_mset: "size M = 1 \ \a. M = {#a#}" by (cases M) auto subsubsection \Strong induction and subset induction for multisets\ text \Well-foundedness of strict subset relation\ lemma wf_subset_mset_rel: "wf {(M, N :: 'a multiset). M \# N}" apply (rule wf_measure [THEN wf_subset, where f1=size]) apply (clarsimp simp: measure_def inv_image_def mset_subset_size) done lemma full_multiset_induct [case_names less]: assumes ih: "\B. \(A::'a multiset). A \# B \ P A \ P B" shows "P B" apply (rule wf_subset_mset_rel [THEN wf_induct]) apply (rule ih, auto) done lemma multi_subset_induct [consumes 2, case_names empty add]: assumes "F \# A" and empty: "P {#}" and insert: "\a F. a \# A \ P F \ P (add_mset a F)" shows "P F" proof - from \F \# A\ show ?thesis proof (induct F) show "P {#}" by fact next fix x F assume P: "F \# A \ P F" and i: "add_mset x F \# A" show "P (add_mset x F)" proof (rule insert) from i show "x \# A" by (auto dest: mset_subset_eq_insertD) from i have "F \# A" by (auto dest: mset_subset_eq_insertD) with P show "P F" . qed qed qed subsection \The fold combinator\ definition fold_mset :: "('a \ 'b \ 'b) \ 'b \ 'a multiset \ 'b" where "fold_mset f s M = Finite_Set.fold (\x. f x ^^ count M x) s (set_mset M)" lemma fold_mset_empty [simp]: "fold_mset f s {#} = s" by (simp add: fold_mset_def) context comp_fun_commute begin lemma fold_mset_add_mset [simp]: "fold_mset f s (add_mset x M) = f x (fold_mset f s M)" proof - interpret mset: comp_fun_commute "\y. f y ^^ count M y" by (fact comp_fun_commute_funpow) interpret mset_union: comp_fun_commute "\y. f y ^^ count (add_mset x M) y" by (fact comp_fun_commute_funpow) show ?thesis proof (cases "x \ set_mset M") case False then have *: "count (add_mset x M) x = 1" by (simp add: not_in_iff) from False have "Finite_Set.fold (\y. f y ^^ count (add_mset x M) y) s (set_mset M) = Finite_Set.fold (\y. f y ^^ count M y) s (set_mset M)" by (auto intro!: Finite_Set.fold_cong comp_fun_commute_on_funpow) with False * show ?thesis by (simp add: fold_mset_def del: count_add_mset) next case True define N where "N = set_mset M - {x}" from N_def True have *: "set_mset M = insert x N" "x \ N" "finite N" by auto then have "Finite_Set.fold (\y. f y ^^ count (add_mset x M) y) s N = Finite_Set.fold (\y. f y ^^ count M y) s N" by (auto intro!: Finite_Set.fold_cong comp_fun_commute_on_funpow) with * show ?thesis by (simp add: fold_mset_def del: count_add_mset) simp qed qed corollary fold_mset_single: "fold_mset f s {#x#} = f x s" by simp lemma fold_mset_fun_left_comm: "f x (fold_mset f s M) = fold_mset f (f x s) M" by (induct M) (simp_all add: fun_left_comm) lemma fold_mset_union [simp]: "fold_mset f s (M + N) = fold_mset f (fold_mset f s M) N" by (induct M) (simp_all add: fold_mset_fun_left_comm) lemma fold_mset_fusion: assumes "comp_fun_commute g" and *: "\x y. h (g x y) = f x (h y)" shows "h (fold_mset g w A) = fold_mset f (h w) A" proof - interpret comp_fun_commute g by (fact assms) from * show ?thesis by (induct A) auto qed end lemma union_fold_mset_add_mset: "A + B = fold_mset add_mset A B" proof - interpret comp_fun_commute add_mset by standard auto show ?thesis by (induction B) auto qed text \ A note on code generation: When defining some function containing a subterm \<^term>\fold_mset F\, code generation is not automatic. When interpreting locale \left_commutative\ with \F\, the would be code thms for \<^const>\fold_mset\ become thms like \<^term>\fold_mset F z {#} = z\ where \F\ is not a pattern but contains defined symbols, i.e.\ is not a code thm. Hence a separate constant with its own code thms needs to be introduced for \F\. See the image operator below. \ subsection \Image\ definition image_mset :: "('a \ 'b) \ 'a multiset \ 'b multiset" where "image_mset f = fold_mset (add_mset \ f) {#}" lemma comp_fun_commute_mset_image: "comp_fun_commute (add_mset \ f)" by unfold_locales (simp add: fun_eq_iff) lemma image_mset_empty [simp]: "image_mset f {#} = {#}" by (simp add: image_mset_def) lemma image_mset_single: "image_mset f {#x#} = {#f x#}" by (simp add: comp_fun_commute.fold_mset_add_mset comp_fun_commute_mset_image image_mset_def) lemma image_mset_union [simp]: "image_mset f (M + N) = image_mset f M + image_mset f N" proof - interpret comp_fun_commute "add_mset \ f" by (fact comp_fun_commute_mset_image) show ?thesis by (induct N) (simp_all add: image_mset_def) qed corollary image_mset_add_mset [simp]: "image_mset f (add_mset a M) = add_mset (f a) (image_mset f M)" unfolding image_mset_union add_mset_add_single[of a M] by (simp add: image_mset_single) lemma set_image_mset [simp]: "set_mset (image_mset f M) = image f (set_mset M)" by (induct M) simp_all lemma size_image_mset [simp]: "size (image_mset f M) = size M" by (induct M) simp_all lemma image_mset_is_empty_iff [simp]: "image_mset f M = {#} \ M = {#}" by (cases M) auto lemma image_mset_If: "image_mset (\x. if P x then f x else g x) A = image_mset f (filter_mset P A) + image_mset g (filter_mset (\x. \P x) A)" by (induction A) auto lemma image_mset_Diff: assumes "B \# A" shows "image_mset f (A - B) = image_mset f A - image_mset f B" proof - have "image_mset f (A - B + B) = image_mset f (A - B) + image_mset f B" by simp also from assms have "A - B + B = A" by (simp add: subset_mset.diff_add) finally show ?thesis by simp qed lemma count_image_mset: \count (image_mset f A) x = (\y\f -` {x} \ set_mset A. count A y)\ proof (induction A) case empty then show ?case by simp next case (add x A) moreover have *: "(if x = y then Suc n else n) = n + (if x = y then 1 else 0)" for n y by simp ultimately show ?case by (auto simp: sum.distrib intro!: sum.mono_neutral_left) qed lemma count_image_mset': \count (image_mset f X) y = (\x | x \# X \ y = f x. count X x)\ by (auto simp add: count_image_mset simp flip: singleton_conv2 simp add: Collect_conj_eq ac_simps) lemma image_mset_subseteq_mono: "A \# B \ image_mset f A \# image_mset f B" by (metis image_mset_union subset_mset.le_iff_add) lemma image_mset_subset_mono: "M \# N \ image_mset f M \# image_mset f N" by (metis (no_types) Diff_eq_empty_iff_mset image_mset_Diff image_mset_is_empty_iff image_mset_subseteq_mono subset_mset.less_le_not_le) syntax (ASCII) "_comprehension_mset" :: "'a \ 'b \ 'b multiset \ 'a multiset" ("({#_/. _ :# _#})") syntax "_comprehension_mset" :: "'a \ 'b \ 'b multiset \ 'a multiset" ("({#_/. _ \# _#})") translations "{#e. x \# M#}" \ "CONST image_mset (\x. e) M" syntax (ASCII) "_comprehension_mset'" :: "'a \ 'b \ 'b multiset \ bool \ 'a multiset" ("({#_/ | _ :# _./ _#})") syntax "_comprehension_mset'" :: "'a \ 'b \ 'b multiset \ bool \ 'a multiset" ("({#_/ | _ \# _./ _#})") translations "{#e | x\#M. P#}" \ "{#e. x \# {# x\#M. P#}#}" text \ This allows to write not just filters like \<^term>\{#x\#M. x but also images like \<^term>\{#x+x. x\#M #}\ and @{term [source] "{#x+x|x\#M. x\{#x+x|x\#M. x. \ lemma in_image_mset: "y \# {#f x. x \# M#} \ y \ f ` set_mset M" by simp functor image_mset: image_mset proof - fix f g show "image_mset f \ image_mset g = image_mset (f \ g)" proof fix A show "(image_mset f \ image_mset g) A = image_mset (f \ g) A" by (induct A) simp_all qed show "image_mset id = id" proof fix A show "image_mset id A = id A" by (induct A) simp_all qed qed declare image_mset.id [simp] image_mset.identity [simp] lemma image_mset_id[simp]: "image_mset id x = x" unfolding id_def by auto lemma image_mset_cong: "(\x. x \# M \ f x = g x) \ {#f x. x \# M#} = {#g x. x \# M#}" by (induct M) auto lemma image_mset_cong_pair: "(\x y. (x, y) \# M \ f x y = g x y) \ {#f x y. (x, y) \# M#} = {#g x y. (x, y) \# M#}" by (metis image_mset_cong split_cong) lemma image_mset_const_eq: "{#c. a \# M#} = replicate_mset (size M) c" by (induct M) simp_all subsection \Further conversions\ primrec mset :: "'a list \ 'a multiset" where "mset [] = {#}" | "mset (a # x) = add_mset a (mset x)" lemma in_multiset_in_set: "x \# mset xs \ x \ set xs" by (induct xs) simp_all lemma count_mset: "count (mset xs) x = length (filter (\y. x = y) xs)" by (induct xs) simp_all lemma mset_zero_iff[simp]: "(mset x = {#}) = (x = [])" by (induct x) auto lemma mset_zero_iff_right[simp]: "({#} = mset x) = (x = [])" by (induct x) auto lemma count_mset_gt_0: "x \ set xs \ count (mset xs) x > 0" by (induction xs) auto lemma count_mset_0_iff [simp]: "count (mset xs) x = 0 \ x \ set xs" by (induction xs) auto lemma mset_single_iff[iff]: "mset xs = {#x#} \ xs = [x]" by (cases xs) auto lemma mset_single_iff_right[iff]: "{#x#} = mset xs \ xs = [x]" by (cases xs) auto lemma set_mset_mset[simp]: "set_mset (mset xs) = set xs" by (induct xs) auto lemma set_mset_comp_mset [simp]: "set_mset \ mset = set" by (simp add: fun_eq_iff) lemma size_mset [simp]: "size (mset xs) = length xs" by (induct xs) simp_all lemma mset_append [simp]: "mset (xs @ ys) = mset xs + mset ys" by (induct xs arbitrary: ys) auto lemma mset_filter[simp]: "mset (filter P xs) = {#x \# mset xs. P x #}" by (induct xs) simp_all lemma mset_rev [simp]: "mset (rev xs) = mset xs" by (induct xs) simp_all lemma surj_mset: "surj mset" apply (unfold surj_def) apply (rule allI) apply (rule_tac M = y in multiset_induct) apply auto apply (rule_tac x = "x # xa" in exI) apply auto done lemma distinct_count_atmost_1: "distinct x = (\a. count (mset x) a = (if a \ set x then 1 else 0))" proof (induct x) case Nil then show ?case by simp next case (Cons x xs) show ?case (is "?lhs \ ?rhs") proof assume ?lhs then show ?rhs using Cons by simp next assume ?rhs then have "x \ set xs" by (simp split: if_splits) moreover from \?rhs\ have "(\a. count (mset xs) a = (if a \ set xs then 1 else 0))" by (auto split: if_splits simp add: count_eq_zero_iff) ultimately show ?lhs using Cons by simp qed qed lemma mset_eq_setD: assumes "mset xs = mset ys" shows "set xs = set ys" proof - from assms have "set_mset (mset xs) = set_mset (mset ys)" by simp then show ?thesis by simp qed lemma set_eq_iff_mset_eq_distinct: \distinct x \ distinct y \ set x = set y \ mset x = mset y\ by (auto simp: multiset_eq_iff distinct_count_atmost_1) lemma set_eq_iff_mset_remdups_eq: \set x = set y \ mset (remdups x) = mset (remdups y)\ apply (rule iffI) apply (simp add: set_eq_iff_mset_eq_distinct[THEN iffD1]) apply (drule distinct_remdups [THEN distinct_remdups [THEN set_eq_iff_mset_eq_distinct [THEN iffD2]]]) apply simp done lemma mset_eq_imp_distinct_iff: \distinct xs \ distinct ys\ if \mset xs = mset ys\ using that by (auto simp add: distinct_count_atmost_1 dest: mset_eq_setD) lemma nth_mem_mset: "i < length ls \ (ls ! i) \# mset ls" proof (induct ls arbitrary: i) case Nil then show ?case by simp next case Cons then show ?case by (cases i) auto qed lemma mset_remove1[simp]: "mset (remove1 a xs) = mset xs - {#a#}" by (induct xs) (auto simp add: multiset_eq_iff) lemma mset_eq_length: assumes "mset xs = mset ys" shows "length xs = length ys" using assms by (metis size_mset) lemma mset_eq_length_filter: assumes "mset xs = mset ys" shows "length (filter (\x. z = x) xs) = length (filter (\y. z = y) ys)" using assms by (metis count_mset) lemma fold_multiset_equiv: \List.fold f xs = List.fold f ys\ if f: \\x y. x \ set xs \ y \ set xs \ f x \ f y = f y \ f x\ and \mset xs = mset ys\ using f \mset xs = mset ys\ [symmetric] proof (induction xs arbitrary: ys) case Nil then show ?case by simp next case (Cons x xs) then have *: \set ys = set (x # xs)\ by (blast dest: mset_eq_setD) have \\x y. x \ set ys \ y \ set ys \ f x \ f y = f y \ f x\ by (rule Cons.prems(1)) (simp_all add: *) moreover from * have \x \ set ys\ by simp ultimately have \List.fold f ys = List.fold f (remove1 x ys) \ f x\ by (fact fold_remove1_split) moreover from Cons.prems have \List.fold f xs = List.fold f (remove1 x ys)\ by (auto intro: Cons.IH) ultimately show ?case by simp qed lemma fold_permuted_eq: \List.fold (\) xs z = List.fold (\) ys z\ if \mset xs = mset ys\ and \P z\ and P: \\x z. x \ set xs \ P z \ P (x \ z)\ and f: \\x y z. x \ set xs \ y \ set xs \ P z \ x \ (y \ z) = y \ (x \ z)\ for f (infixl \\\ 70) using \P z\ P f \mset xs = mset ys\ [symmetric] proof (induction xs arbitrary: ys z) case Nil then show ?case by simp next case (Cons x xs) then have *: \set ys = set (x # xs)\ by (blast dest: mset_eq_setD) have \P z\ by (fact Cons.prems(1)) moreover have \\x z. x \ set ys \ P z \ P (x \ z)\ by (rule Cons.prems(2)) (simp_all add: *) moreover have \\x y z. x \ set ys \ y \ set ys \ P z \ x \ (y \ z) = y \ (x \ z)\ by (rule Cons.prems(3)) (simp_all add: *) moreover from * have \x \ set ys\ by simp ultimately have \fold (\) ys z = fold (\) (remove1 x ys) (x \ z)\ by (induction ys arbitrary: z) auto moreover from Cons.prems have \fold (\) xs (x \ z) = fold (\) (remove1 x ys) (x \ z)\ by (auto intro: Cons.IH) ultimately show ?case by simp qed lemma mset_shuffles: "zs \ shuffles xs ys \ mset zs = mset xs + mset ys" by (induction xs ys arbitrary: zs rule: shuffles.induct) auto lemma mset_insort [simp]: "mset (insort x xs) = add_mset x (mset xs)" by (induct xs) simp_all lemma mset_map[simp]: "mset (map f xs) = image_mset f (mset xs)" by (induct xs) simp_all global_interpretation mset_set: folding add_mset "{#}" defines mset_set = "folding_on.F add_mset {#}" by standard (simp add: fun_eq_iff) lemma sum_multiset_singleton [simp]: "sum (\n. {#n#}) A = mset_set A" by (induction A rule: infinite_finite_induct) auto lemma count_mset_set [simp]: "finite A \ x \ A \ count (mset_set A) x = 1" (is "PROP ?P") "\ finite A \ count (mset_set A) x = 0" (is "PROP ?Q") "x \ A \ count (mset_set A) x = 0" (is "PROP ?R") proof - have *: "count (mset_set A) x = 0" if "x \ A" for A proof (cases "finite A") case False then show ?thesis by simp next case True from True \x \ A\ show ?thesis by (induct A) auto qed then show "PROP ?P" "PROP ?Q" "PROP ?R" by (auto elim!: Set.set_insert) qed \ \TODO: maybe define \<^const>\mset_set\ also in terms of \<^const>\Abs_multiset\\ lemma elem_mset_set[simp, intro]: "finite A \ x \# mset_set A \ x \ A" by (induct A rule: finite_induct) simp_all lemma mset_set_Union: "finite A \ finite B \ A \ B = {} \ mset_set (A \ B) = mset_set A + mset_set B" by (induction A rule: finite_induct) auto lemma filter_mset_mset_set [simp]: "finite A \ filter_mset P (mset_set A) = mset_set {x\A. P x}" proof (induction A rule: finite_induct) case (insert x A) from insert.hyps have "filter_mset P (mset_set (insert x A)) = filter_mset P (mset_set A) + mset_set (if P x then {x} else {})" by simp also have "filter_mset P (mset_set A) = mset_set {x\A. P x}" by (rule insert.IH) also from insert.hyps have "\ + mset_set (if P x then {x} else {}) = mset_set ({x \ A. P x} \ (if P x then {x} else {}))" (is "_ = mset_set ?A") by (intro mset_set_Union [symmetric]) simp_all also from insert.hyps have "?A = {y\insert x A. P y}" by auto finally show ?case . qed simp_all lemma mset_set_Diff: assumes "finite A" "B \ A" shows "mset_set (A - B) = mset_set A - mset_set B" proof - from assms have "mset_set ((A - B) \ B) = mset_set (A - B) + mset_set B" by (intro mset_set_Union) (auto dest: finite_subset) also from assms have "A - B \ B = A" by blast finally show ?thesis by simp qed lemma mset_set_set: "distinct xs \ mset_set (set xs) = mset xs" by (induction xs) simp_all lemma count_mset_set': "count (mset_set A) x = (if finite A \ x \ A then 1 else 0)" by auto lemma subset_imp_msubset_mset_set: assumes "A \ B" "finite B" shows "mset_set A \# mset_set B" proof (rule mset_subset_eqI) fix x :: 'a from assms have "finite A" by (rule finite_subset) with assms show "count (mset_set A) x \ count (mset_set B) x" by (cases "x \ A"; cases "x \ B") auto qed lemma mset_set_set_mset_msubset: "mset_set (set_mset A) \# A" proof (rule mset_subset_eqI) fix x show "count (mset_set (set_mset A)) x \ count A x" by (cases "x \# A") simp_all qed lemma mset_set_upto_eq_mset_upto: \mset_set {.. by (induction n) (auto simp: ac_simps lessThan_Suc) context linorder begin definition sorted_list_of_multiset :: "'a multiset \ 'a list" where "sorted_list_of_multiset M = fold_mset insort [] M" lemma sorted_list_of_multiset_empty [simp]: "sorted_list_of_multiset {#} = []" by (simp add: sorted_list_of_multiset_def) lemma sorted_list_of_multiset_singleton [simp]: "sorted_list_of_multiset {#x#} = [x]" proof - interpret comp_fun_commute insort by (fact comp_fun_commute_insort) show ?thesis by (simp add: sorted_list_of_multiset_def) qed lemma sorted_list_of_multiset_insert [simp]: "sorted_list_of_multiset (add_mset x M) = List.insort x (sorted_list_of_multiset M)" proof - interpret comp_fun_commute insort by (fact comp_fun_commute_insort) show ?thesis by (simp add: sorted_list_of_multiset_def) qed end lemma mset_sorted_list_of_multiset[simp]: "mset (sorted_list_of_multiset M) = M" by (induct M) simp_all lemma sorted_list_of_multiset_mset[simp]: "sorted_list_of_multiset (mset xs) = sort xs" by (induct xs) simp_all lemma finite_set_mset_mset_set[simp]: "finite A \ set_mset (mset_set A) = A" by auto lemma mset_set_empty_iff: "mset_set A = {#} \ A = {} \ infinite A" using finite_set_mset_mset_set by fastforce lemma infinite_set_mset_mset_set: "\ finite A \ set_mset (mset_set A) = {}" by simp lemma set_sorted_list_of_multiset [simp]: "set (sorted_list_of_multiset M) = set_mset M" by (induct M) (simp_all add: set_insort_key) lemma sorted_list_of_mset_set [simp]: "sorted_list_of_multiset (mset_set A) = sorted_list_of_set A" by (cases "finite A") (induct A rule: finite_induct, simp_all) lemma mset_upt [simp]: "mset [m.. {#the (map_of xs i). i \# mset (map fst xs)#} = mset (map snd xs)" proof (induction xs) case (Cons x xs) have "{#the (map_of (x # xs) i). i \# mset (map fst (x # xs))#} = add_mset (snd x) {#the (if i = fst x then Some (snd x) else map_of xs i). i \# mset (map fst xs)#}" (is "_ = add_mset _ ?A") by simp also from Cons.prems have "?A = {#the (map_of xs i). i :# mset (map fst xs)#}" by (cases x, intro image_mset_cong) (auto simp: in_multiset_in_set) also from Cons.prems have "\ = mset (map snd xs)" by (intro Cons.IH) simp_all finally show ?case by simp qed simp_all lemma msubset_mset_set_iff[simp]: assumes "finite A" "finite B" shows "mset_set A \# mset_set B \ A \ B" using assms set_mset_mono subset_imp_msubset_mset_set by fastforce lemma mset_set_eq_iff[simp]: assumes "finite A" "finite B" shows "mset_set A = mset_set B \ A = B" using assms by (fastforce dest: finite_set_mset_mset_set) lemma image_mset_mset_set: \<^marker>\contributor \Lukas Bulwahn\\ assumes "inj_on f A" shows "image_mset f (mset_set A) = mset_set (f ` A)" proof cases assume "finite A" from this \inj_on f A\ show ?thesis by (induct A) auto next assume "infinite A" from this \inj_on f A\ have "infinite (f ` A)" using finite_imageD by blast from \infinite A\ \infinite (f ` A)\ show ?thesis by simp qed subsection \More properties of the replicate and repeat operations\ lemma in_replicate_mset[simp]: "x \# replicate_mset n y \ n > 0 \ x = y" unfolding replicate_mset_def by (induct n) auto lemma set_mset_replicate_mset_subset[simp]: "set_mset (replicate_mset n x) = (if n = 0 then {} else {x})" by (auto split: if_splits) lemma size_replicate_mset[simp]: "size (replicate_mset n M) = n" by (induct n, simp_all) lemma count_le_replicate_mset_subset_eq: "n \ count M x \ replicate_mset n x \# M" by (auto simp add: mset_subset_eqI) (metis count_replicate_mset subseteq_mset_def) lemma filter_eq_replicate_mset: "{#y \# D. y = x#} = replicate_mset (count D x) x" by (induct D) simp_all lemma replicate_count_mset_eq_filter_eq: "replicate (count (mset xs) k) k = filter (HOL.eq k) xs" by (induct xs) auto lemma replicate_mset_eq_empty_iff [simp]: "replicate_mset n a = {#} \ n = 0" by (induct n) simp_all lemma replicate_mset_eq_iff: "replicate_mset m a = replicate_mset n b \ m = 0 \ n = 0 \ m = n \ a = b" by (auto simp add: multiset_eq_iff) lemma repeat_mset_cancel1: "repeat_mset a A = repeat_mset a B \ A = B \ a = 0" by (auto simp: multiset_eq_iff) lemma repeat_mset_cancel2: "repeat_mset a A = repeat_mset b A \ a = b \ A = {#}" by (auto simp: multiset_eq_iff) lemma repeat_mset_eq_empty_iff: "repeat_mset n A = {#} \ n = 0 \ A = {#}" by (cases n) auto lemma image_replicate_mset [simp]: "image_mset f (replicate_mset n a) = replicate_mset n (f a)" by (induct n) simp_all lemma replicate_mset_msubseteq_iff: "replicate_mset m a \# replicate_mset n b \ m = 0 \ a = b \ m \ n" by (cases m) (auto simp: insert_subset_eq_iff simp flip: count_le_replicate_mset_subset_eq) lemma msubseteq_replicate_msetE: assumes "A \# replicate_mset n a" obtains m where "m \ n" and "A = replicate_mset m a" proof (cases "n = 0") case True with assms that show thesis by simp next case False from assms have "set_mset A \ set_mset (replicate_mset n a)" by (rule set_mset_mono) with False have "set_mset A \ {a}" by simp then have "\m. A = replicate_mset m a" proof (induction A) case empty then show ?case by simp next case (add b A) then obtain m where "A = replicate_mset m a" by auto with add.prems show ?case by (auto intro: exI [of _ "Suc m"]) qed then obtain m where A: "A = replicate_mset m a" .. with assms have "m \ n" by (auto simp add: replicate_mset_msubseteq_iff) then show thesis using A .. qed subsection \Big operators\ locale comm_monoid_mset = comm_monoid begin interpretation comp_fun_commute f by standard (simp add: fun_eq_iff left_commute) interpretation comp?: comp_fun_commute "f \ g" by (fact comp_comp_fun_commute) context begin definition F :: "'a multiset \ 'a" where eq_fold: "F M = fold_mset f \<^bold>1 M" lemma empty [simp]: "F {#} = \<^bold>1" by (simp add: eq_fold) lemma singleton [simp]: "F {#x#} = x" proof - interpret comp_fun_commute by standard (simp add: fun_eq_iff left_commute) show ?thesis by (simp add: eq_fold) qed lemma union [simp]: "F (M + N) = F M \<^bold>* F N" proof - interpret comp_fun_commute f by standard (simp add: fun_eq_iff left_commute) show ?thesis by (induct N) (simp_all add: left_commute eq_fold) qed lemma add_mset [simp]: "F (add_mset x N) = x \<^bold>* F N" unfolding add_mset_add_single[of x N] union by (simp add: ac_simps) lemma insert [simp]: shows "F (image_mset g (add_mset x A)) = g x \<^bold>* F (image_mset g A)" by (simp add: eq_fold) lemma remove: assumes "x \# A" shows "F A = x \<^bold>* F (A - {#x#})" using multi_member_split[OF assms] by auto lemma neutral: "\x\#A. x = \<^bold>1 \ F A = \<^bold>1" by (induct A) simp_all lemma neutral_const [simp]: "F (image_mset (\_. \<^bold>1) A) = \<^bold>1" by (simp add: neutral) private lemma F_image_mset_product: "F {#g x j \<^bold>* F {#g i j. i \# A#}. j \# B#} = F (image_mset (g x) B) \<^bold>* F {#F {#g i j. i \# A#}. j \# B#}" by (induction B) (simp_all add: left_commute semigroup.assoc semigroup_axioms) lemma swap: "F (image_mset (\i. F (image_mset (g i) B)) A) = F (image_mset (\j. F (image_mset (\i. g i j) A)) B)" apply (induction A, simp) apply (induction B, auto simp add: F_image_mset_product ac_simps) done lemma distrib: "F (image_mset (\x. g x \<^bold>* h x) A) = F (image_mset g A) \<^bold>* F (image_mset h A)" by (induction A) (auto simp: ac_simps) lemma union_disjoint: "A \# B = {#} \ F (image_mset g (A \# B)) = F (image_mset g A) \<^bold>* F (image_mset g B)" by (induction A) (auto simp: ac_simps) end end lemma comp_fun_commute_plus_mset[simp]: "comp_fun_commute ((+) :: 'a multiset \ _ \ _)" by standard (simp add: add_ac comp_def) declare comp_fun_commute.fold_mset_add_mset[OF comp_fun_commute_plus_mset, simp] lemma in_mset_fold_plus_iff[iff]: "x \# fold_mset (+) M NN \ x \# M \ (\N. N \# NN \ x \# N)" by (induct NN) auto context comm_monoid_add begin sublocale sum_mset: comm_monoid_mset plus 0 defines sum_mset = sum_mset.F .. lemma sum_unfold_sum_mset: "sum f A = sum_mset (image_mset f (mset_set A))" by (cases "finite A") (induct A rule: finite_induct, simp_all) end notation sum_mset ("\\<^sub>#") syntax (ASCII) "_sum_mset_image" :: "pttrn \ 'b set \ 'a \ 'a::comm_monoid_add" ("(3SUM _:#_. _)" [0, 51, 10] 10) syntax "_sum_mset_image" :: "pttrn \ 'b set \ 'a \ 'a::comm_monoid_add" ("(3\_\#_. _)" [0, 51, 10] 10) translations "\i \# A. b" \ "CONST sum_mset (CONST image_mset (\i. b) A)" context comm_monoid_add begin lemma sum_mset_sum_list: "sum_mset (mset xs) = sum_list xs" by (induction xs) auto end context canonically_ordered_monoid_add begin lemma sum_mset_0_iff [simp]: "sum_mset M = 0 \ (\x \ set_mset M. x = 0)" by (induction M) auto end context ordered_comm_monoid_add begin lemma sum_mset_mono: "sum_mset (image_mset f K) \ sum_mset (image_mset g K)" if "\i. i \# K \ f i \ g i" using that by (induction K) (simp_all add: add_mono) end context cancel_comm_monoid_add begin lemma sum_mset_diff: "sum_mset (M - N) = sum_mset M - sum_mset N" if "N \# M" for M N :: "'a multiset" using that by (auto simp add: subset_mset.le_iff_add) end context semiring_0 begin lemma sum_mset_distrib_left: "c * (\x \# M. f x) = (\x \# M. c * f(x))" by (induction M) (simp_all add: algebra_simps) lemma sum_mset_distrib_right: "(\x \# M. f x) * c = (\x \# M. f x * c)" by (induction M) (simp_all add: algebra_simps) end lemma sum_mset_product: fixes f :: "'a::{comm_monoid_add,times} \ 'b::semiring_0" shows "(\i \# A. f i) * (\i \# B. g i) = (\i\#A. \j\#B. f i * g j)" by (subst sum_mset.swap) (simp add: sum_mset_distrib_left sum_mset_distrib_right) context semiring_1 begin lemma sum_mset_replicate_mset [simp]: "sum_mset (replicate_mset n a) = of_nat n * a" by (induction n) (simp_all add: algebra_simps) lemma sum_mset_delta: "sum_mset (image_mset (\x. if x = y then c else 0) A) = c * of_nat (count A y)" by (induction A) (simp_all add: algebra_simps) lemma sum_mset_delta': "sum_mset (image_mset (\x. if y = x then c else 0) A) = c * of_nat (count A y)" by (induction A) (simp_all add: algebra_simps) end lemma of_nat_sum_mset [simp]: "of_nat (sum_mset A) = sum_mset (image_mset of_nat A)" by (induction A) auto lemma size_eq_sum_mset: "size M = (\a\#M. 1)" using image_mset_const_eq [of "1::nat" M] by simp lemma size_mset_set [simp]: "size (mset_set A) = card A" by (simp only: size_eq_sum_mset card_eq_sum sum_unfold_sum_mset) lemma sum_mset_constant [simp]: fixes y :: "'b::semiring_1" shows \(\x\#A. y) = of_nat (size A) * y\ by (induction A) (auto simp: algebra_simps) lemma set_mset_Union_mset[simp]: "set_mset (\\<^sub># MM) = (\M \ set_mset MM. set_mset M)" by (induct MM) auto lemma in_Union_mset_iff[iff]: "x \# \\<^sub># MM \ (\M. M \# MM \ x \# M)" by (induct MM) auto lemma count_sum: "count (sum f A) x = sum (\a. count (f a) x) A" by (induct A rule: infinite_finite_induct) simp_all lemma sum_eq_empty_iff: assumes "finite A" shows "sum f A = {#} \ (\a\A. f a = {#})" using assms by induct simp_all lemma Union_mset_empty_conv[simp]: "\\<^sub># M = {#} \ (\i\#M. i = {#})" by (induction M) auto lemma Union_image_single_mset[simp]: "\\<^sub># (image_mset (\x. {#x#}) m) = m" by(induction m) auto context comm_monoid_mult begin sublocale prod_mset: comm_monoid_mset times 1 defines prod_mset = prod_mset.F .. lemma prod_mset_empty: "prod_mset {#} = 1" by (fact prod_mset.empty) lemma prod_mset_singleton: "prod_mset {#x#} = x" by (fact prod_mset.singleton) lemma prod_mset_Un: "prod_mset (A + B) = prod_mset A * prod_mset B" by (fact prod_mset.union) lemma prod_mset_prod_list: "prod_mset (mset xs) = prod_list xs" by (induct xs) auto lemma prod_mset_replicate_mset [simp]: "prod_mset (replicate_mset n a) = a ^ n" by (induct n) simp_all lemma prod_unfold_prod_mset: "prod f A = prod_mset (image_mset f (mset_set A))" by (cases "finite A") (induct A rule: finite_induct, simp_all) lemma prod_mset_multiplicity: "prod_mset M = prod (\x. x ^ count M x) (set_mset M)" by (simp add: fold_mset_def prod.eq_fold prod_mset.eq_fold funpow_times_power comp_def) lemma prod_mset_delta: "prod_mset (image_mset (\x. if x = y then c else 1) A) = c ^ count A y" by (induction A) simp_all lemma prod_mset_delta': "prod_mset (image_mset (\x. if y = x then c else 1) A) = c ^ count A y" by (induction A) simp_all lemma prod_mset_subset_imp_dvd: assumes "A \# B" shows "prod_mset A dvd prod_mset B" proof - from assms have "B = (B - A) + A" by (simp add: subset_mset.diff_add) also have "prod_mset \ = prod_mset (B - A) * prod_mset A" by simp also have "prod_mset A dvd \" by simp finally show ?thesis . qed lemma dvd_prod_mset: assumes "x \# A" shows "x dvd prod_mset A" using assms prod_mset_subset_imp_dvd [of "{#x#}" A] by simp end notation prod_mset ("\\<^sub>#") syntax (ASCII) "_prod_mset_image" :: "pttrn \ 'b set \ 'a \ 'a::comm_monoid_mult" ("(3PROD _:#_. _)" [0, 51, 10] 10) syntax "_prod_mset_image" :: "pttrn \ 'b set \ 'a \ 'a::comm_monoid_mult" ("(3\_\#_. _)" [0, 51, 10] 10) translations "\i \# A. b" \ "CONST prod_mset (CONST image_mset (\i. b) A)" lemma prod_mset_constant [simp]: "(\_\#A. c) = c ^ size A" by (simp add: image_mset_const_eq) lemma (in semidom) prod_mset_zero_iff [iff]: "prod_mset A = 0 \ 0 \# A" by (induct A) auto lemma (in semidom_divide) prod_mset_diff: assumes "B \# A" and "0 \# B" shows "prod_mset (A - B) = prod_mset A div prod_mset B" proof - from assms obtain C where "A = B + C" by (metis subset_mset.add_diff_inverse) with assms show ?thesis by simp qed lemma (in semidom_divide) prod_mset_minus: assumes "a \# A" and "a \ 0" shows "prod_mset (A - {#a#}) = prod_mset A div a" using assms prod_mset_diff [of "{#a#}" A] by auto lemma (in normalization_semidom) normalize_prod_mset_normalize: "normalize (prod_mset (image_mset normalize A)) = normalize (prod_mset A)" proof (induction A) case (add x A) have "normalize (prod_mset (image_mset normalize (add_mset x A))) = normalize (x * normalize (prod_mset (image_mset normalize A)))" by simp also note add.IH finally show ?case by simp qed auto lemma (in algebraic_semidom) is_unit_prod_mset_iff: "is_unit (prod_mset A) \ (\x \# A. is_unit x)" by (induct A) (auto simp: is_unit_mult_iff) lemma (in normalization_semidom_multiplicative) normalize_prod_mset: "normalize (prod_mset A) = prod_mset (image_mset normalize A)" by (induct A) (simp_all add: normalize_mult) lemma (in normalization_semidom_multiplicative) normalized_prod_msetI: assumes "\a. a \# A \ normalize a = a" shows "normalize (prod_mset A) = prod_mset A" proof - from assms have "image_mset normalize A = A" by (induct A) simp_all then show ?thesis by (simp add: normalize_prod_mset) qed subsection \Multiset as order-ignorant lists\ context linorder begin lemma mset_insort [simp]: "mset (insort_key k x xs) = add_mset x (mset xs)" by (induct xs) simp_all lemma mset_sort [simp]: "mset (sort_key k xs) = mset xs" by (induct xs) simp_all text \ This lemma shows which properties suffice to show that a function \f\ with \f xs = ys\ behaves like sort. \ lemma properties_for_sort_key: assumes "mset ys = mset xs" and "\k. k \ set ys \ filter (\x. f k = f x) ys = filter (\x. f k = f x) xs" and "sorted (map f ys)" shows "sort_key f xs = ys" using assms proof (induct xs arbitrary: ys) case Nil then show ?case by simp next case (Cons x xs) from Cons.prems(2) have "\k \ set ys. filter (\x. f k = f x) (remove1 x ys) = filter (\x. f k = f x) xs" by (simp add: filter_remove1) with Cons.prems have "sort_key f xs = remove1 x ys" by (auto intro!: Cons.hyps simp add: sorted_map_remove1) moreover from Cons.prems have "x \# mset ys" by auto then have "x \ set ys" by simp ultimately show ?case using Cons.prems by (simp add: insort_key_remove1) qed lemma properties_for_sort: assumes multiset: "mset ys = mset xs" and "sorted ys" shows "sort xs = ys" proof (rule properties_for_sort_key) from multiset show "mset ys = mset xs" . from \sorted ys\ show "sorted (map (\x. x) ys)" by simp from multiset have "length (filter (\y. k = y) ys) = length (filter (\x. k = x) xs)" for k by (rule mset_eq_length_filter) then have "replicate (length (filter (\y. k = y) ys)) k = replicate (length (filter (\x. k = x) xs)) k" for k by simp then show "k \ set ys \ filter (\y. k = y) ys = filter (\x. k = x) xs" for k by (simp add: replicate_length_filter) qed lemma sort_key_inj_key_eq: assumes mset_equal: "mset xs = mset ys" and "inj_on f (set xs)" and "sorted (map f ys)" shows "sort_key f xs = ys" proof (rule properties_for_sort_key) from mset_equal show "mset ys = mset xs" by simp from \sorted (map f ys)\ show "sorted (map f ys)" . show "[x\ys . f k = f x] = [x\xs . f k = f x]" if "k \ set ys" for k proof - from mset_equal have set_equal: "set xs = set ys" by (rule mset_eq_setD) with that have "insert k (set ys) = set ys" by auto with \inj_on f (set xs)\ have inj: "inj_on f (insert k (set ys))" by (simp add: set_equal) from inj have "[x\ys . f k = f x] = filter (HOL.eq k) ys" by (auto intro!: inj_on_filter_key_eq) also have "\ = replicate (count (mset ys) k) k" by (simp add: replicate_count_mset_eq_filter_eq) also have "\ = replicate (count (mset xs) k) k" using mset_equal by simp also have "\ = filter (HOL.eq k) xs" by (simp add: replicate_count_mset_eq_filter_eq) also have "\ = [x\xs . f k = f x]" using inj by (auto intro!: inj_on_filter_key_eq [symmetric] simp add: set_equal) finally show ?thesis . qed qed lemma sort_key_eq_sort_key: assumes "mset xs = mset ys" and "inj_on f (set xs)" shows "sort_key f xs = sort_key f ys" by (rule sort_key_inj_key_eq) (simp_all add: assms) lemma sort_key_by_quicksort: "sort_key f xs = sort_key f [x\xs. f x < f (xs ! (length xs div 2))] @ [x\xs. f x = f (xs ! (length xs div 2))] @ sort_key f [x\xs. f x > f (xs ! (length xs div 2))]" (is "sort_key f ?lhs = ?rhs") proof (rule properties_for_sort_key) show "mset ?rhs = mset ?lhs" by (rule multiset_eqI) auto show "sorted (map f ?rhs)" by (auto simp add: sorted_append intro: sorted_map_same) next fix l assume "l \ set ?rhs" let ?pivot = "f (xs ! (length xs div 2))" have *: "\x. f l = f x \ f x = f l" by auto have "[x \ sort_key f xs . f x = f l] = [x \ xs. f x = f l]" unfolding filter_sort by (rule properties_for_sort_key) (auto intro: sorted_map_same) with * have **: "[x \ sort_key f xs . f l = f x] = [x \ xs. f l = f x]" by simp have "\x P. P (f x) ?pivot \ f l = f x \ P (f l) ?pivot \ f l = f x" by auto then have "\P. [x \ sort_key f xs . P (f x) ?pivot \ f l = f x] = [x \ sort_key f xs. P (f l) ?pivot \ f l = f x]" by simp note *** = this [of "(<)"] this [of "(>)"] this [of "(=)"] show "[x \ ?rhs. f l = f x] = [x \ ?lhs. f l = f x]" proof (cases "f l" ?pivot rule: linorder_cases) case less then have "f l \ ?pivot" and "\ f l > ?pivot" by auto with less show ?thesis by (simp add: filter_sort [symmetric] ** ***) next case equal then show ?thesis by (simp add: * less_le) next case greater then have "f l \ ?pivot" and "\ f l < ?pivot" by auto with greater show ?thesis by (simp add: filter_sort [symmetric] ** ***) qed qed lemma sort_by_quicksort: "sort xs = sort [x\xs. x < xs ! (length xs div 2)] @ [x\xs. x = xs ! (length xs div 2)] @ sort [x\xs. x > xs ! (length xs div 2)]" (is "sort ?lhs = ?rhs") using sort_key_by_quicksort [of "\x. x", symmetric] by simp text \A stable parameterized quicksort\ definition part :: "('b \ 'a) \ 'a \ 'b list \ 'b list \ 'b list \ 'b list" where "part f pivot xs = ([x \ xs. f x < pivot], [x \ xs. f x = pivot], [x \ xs. pivot < f x])" lemma part_code [code]: "part f pivot [] = ([], [], [])" "part f pivot (x # xs) = (let (lts, eqs, gts) = part f pivot xs; x' = f x in if x' < pivot then (x # lts, eqs, gts) else if x' > pivot then (lts, eqs, x # gts) else (lts, x # eqs, gts))" by (auto simp add: part_def Let_def split_def) lemma sort_key_by_quicksort_code [code]: "sort_key f xs = (case xs of [] \ [] | [x] \ xs | [x, y] \ (if f x \ f y then xs else [y, x]) | _ \ let (lts, eqs, gts) = part f (f (xs ! (length xs div 2))) xs in sort_key f lts @ eqs @ sort_key f gts)" proof (cases xs) case Nil then show ?thesis by simp next case (Cons _ ys) note hyps = Cons show ?thesis proof (cases ys) case Nil with hyps show ?thesis by simp next case (Cons _ zs) note hyps = hyps Cons show ?thesis proof (cases zs) case Nil with hyps show ?thesis by auto next case Cons from sort_key_by_quicksort [of f xs] have "sort_key f xs = (let (lts, eqs, gts) = part f (f (xs ! (length xs div 2))) xs in sort_key f lts @ eqs @ sort_key f gts)" by (simp only: split_def Let_def part_def fst_conv snd_conv) with hyps Cons show ?thesis by (simp only: list.cases) qed qed qed end hide_const (open) part lemma mset_remdups_subset_eq: "mset (remdups xs) \# mset xs" by (induct xs) (auto intro: subset_mset.order_trans) lemma mset_update: "i < length ls \ mset (ls[i := v]) = add_mset v (mset ls - {#ls ! i#})" proof (induct ls arbitrary: i) case Nil then show ?case by simp next case (Cons x xs) show ?case proof (cases i) case 0 then show ?thesis by simp next case (Suc i') with Cons show ?thesis by (cases \x = xs ! i'\) auto qed qed lemma mset_swap: "i < length ls \ j < length ls \ mset (ls[j := ls ! i, i := ls ! j]) = mset ls" by (cases "i = j") (simp_all add: mset_update nth_mem_mset) lemma mset_eq_finite: \finite {ys. mset ys = mset xs}\ proof - have \{ys. mset ys = mset xs} \ {ys. set ys \ set xs \ length ys \ length xs}\ by (auto simp add: dest: mset_eq_setD mset_eq_length) moreover have \finite {ys. set ys \ set xs \ length ys \ length xs}\ using finite_lists_length_le by blast ultimately show ?thesis by (rule finite_subset) qed subsection \The multiset order\ -subsubsection \Well-foundedness\ - definition mult1 :: "('a \ 'a) set \ ('a multiset \ 'a multiset) set" where "mult1 r = {(N, M). \a M0 K. M = add_mset a M0 \ N = M0 + K \ (\b. b \# K \ (b, a) \ r)}" definition mult :: "('a \ 'a) set \ ('a multiset \ 'a multiset) set" where "mult r = (mult1 r)\<^sup>+" +definition multp :: "('a \ 'a \ bool) \ 'a multiset \ 'a multiset \ bool" where + "multp r M N \ (M, N) \ mult {(x, y). r x y}" + lemma mult1I: assumes "M = add_mset a M0" and "N = M0 + K" and "\b. b \# K \ (b, a) \ r" shows "(N, M) \ mult1 r" using assms unfolding mult1_def by blast lemma mult1E: assumes "(N, M) \ mult1 r" obtains a M0 K where "M = add_mset a M0" "N = M0 + K" "\b. b \# K \ (b, a) \ r" using assms unfolding mult1_def by blast lemma mono_mult1: assumes "r \ r'" shows "mult1 r \ mult1 r'" -unfolding mult1_def using assms by blast + unfolding mult1_def using assms by blast lemma mono_mult: assumes "r \ r'" shows "mult r \ mult r'" -unfolding mult_def using mono_mult1[OF assms] trancl_mono by blast + unfolding mult_def using mono_mult1[OF assms] trancl_mono by blast + +lemma mono_multp[mono]: "r \ r' \ multp r \ multp r'" + unfolding le_fun_def le_bool_def +proof (intro allI impI) + fix M N :: "'a multiset" + assume "\x xa. r x xa \ r' x xa" + hence "{(x, y). r x y} \ {(x, y). r' x y}" + by blast + thus "multp r M N \ multp r' M N" + unfolding multp_def + by (fact mono_mult[THEN subsetD, rotated]) +qed lemma not_less_empty [iff]: "(M, {#}) \ mult1 r" -by (simp add: mult1_def) + by (simp add: mult1_def) + + +subsubsection \Well-foundedness\ lemma less_add: assumes mult1: "(N, add_mset a M0) \ mult1 r" shows "(\M. (M, M0) \ mult1 r \ N = add_mset a M) \ (\K. (\b. b \# K \ (b, a) \ r) \ N = M0 + K)" proof - let ?r = "\K a. \b. b \# K \ (b, a) \ r" let ?R = "\N M. \a M0 K. M = add_mset a M0 \ N = M0 + K \ ?r K a" obtain a' M0' K where M0: "add_mset a M0 = add_mset a' M0'" and N: "N = M0' + K" and r: "?r K a'" using mult1 unfolding mult1_def by auto show ?thesis (is "?case1 \ ?case2") proof - from M0 consider "M0 = M0'" "a = a'" | K' where "M0 = add_mset a' K'" "M0' = add_mset a K'" by atomize_elim (simp only: add_eq_conv_ex) then show ?thesis proof cases case 1 with N r have "?r K a \ N = M0 + K" by simp then have ?case2 .. then show ?thesis .. next case 2 from N 2(2) have n: "N = add_mset a (K' + K)" by simp with r 2(1) have "?R (K' + K) M0" by blast with n have ?case1 by (simp add: mult1_def) then show ?thesis .. qed qed qed lemma all_accessible: assumes "wf r" shows "\M. M \ Wellfounded.acc (mult1 r)" proof let ?R = "mult1 r" let ?W = "Wellfounded.acc ?R" { fix M M0 a assume M0: "M0 \ ?W" and wf_hyp: "\b. (b, a) \ r \ (\M \ ?W. add_mset b M \ ?W)" and acc_hyp: "\M. (M, M0) \ ?R \ add_mset a M \ ?W" have "add_mset a M0 \ ?W" proof (rule accI [of "add_mset a M0"]) fix N assume "(N, add_mset a M0) \ ?R" then consider M where "(M, M0) \ ?R" "N = add_mset a M" | K where "\b. b \# K \ (b, a) \ r" "N = M0 + K" by atomize_elim (rule less_add) then show "N \ ?W" proof cases case 1 from acc_hyp have "(M, M0) \ ?R \ add_mset a M \ ?W" .. from this and \(M, M0) \ ?R\ have "add_mset a M \ ?W" .. then show "N \ ?W" by (simp only: \N = add_mset a M\) next case 2 from this(1) have "M0 + K \ ?W" proof (induct K) case empty from M0 show "M0 + {#} \ ?W" by simp next case (add x K) from add.prems have "(x, a) \ r" by simp with wf_hyp have "\M \ ?W. add_mset x M \ ?W" by blast moreover from add have "M0 + K \ ?W" by simp ultimately have "add_mset x (M0 + K) \ ?W" .. then show "M0 + (add_mset x K) \ ?W" by simp qed then show "N \ ?W" by (simp only: 2(2)) qed qed } note tedious_reasoning = this show "M \ ?W" for M proof (induct M) show "{#} \ ?W" proof (rule accI) fix b assume "(b, {#}) \ ?R" with not_less_empty show "b \ ?W" by contradiction qed fix M a assume "M \ ?W" from \wf r\ have "\M \ ?W. add_mset a M \ ?W" proof induct fix a assume r: "\b. (b, a) \ r \ (\M \ ?W. add_mset b M \ ?W)" show "\M \ ?W. add_mset a M \ ?W" proof fix M assume "M \ ?W" then show "add_mset a M \ ?W" by (rule acc_induct) (rule tedious_reasoning [OF _ r]) qed qed from this and \M \ ?W\ show "add_mset a M \ ?W" .. qed qed -theorem wf_mult1: "wf r \ wf (mult1 r)" -by (rule acc_wfI) (rule all_accessible) - -theorem wf_mult: "wf r \ wf (mult r)" -unfolding mult_def by (rule wf_trancl) (rule wf_mult1) +lemma wf_mult1: "wf r \ wf (mult1 r)" + by (rule acc_wfI) (rule all_accessible) + +lemma wf_mult: "wf r \ wf (mult r)" + unfolding mult_def by (rule wf_trancl) (rule wf_mult1) + +lemma wfP_multp: "wfP r \ wfP (multp r)" + unfolding multp_def wfP_def + by (simp add: wf_mult) subsubsection \Closure-free presentation\ text \One direction.\ lemma mult_implies_one_step: assumes trans: "trans r" and MN: "(M, N) \ mult r" shows "\I J K. N = I + J \ M = I + K \ J \ {#} \ (\k \ set_mset K. \j \ set_mset J. (k, j) \ r)" using MN unfolding mult_def mult1_def proof (induction rule: converse_trancl_induct) case (base y) then show ?case by force next case (step y z) note yz = this(1) and zN = this(2) and N_decomp = this(3) obtain I J K where N: "N = I + J" "z = I + K" "J \ {#}" "\k\#K. \j\#J. (k, j) \ r" using N_decomp by blast obtain a M0 K' where z: "z = add_mset a M0" and y: "y = M0 + K'" and K: "\b. b \# K' \ (b, a) \ r" using yz by blast show ?case proof (cases "a \# K") case True moreover have "\j\#J. (k, j) \ r" if "k \# K'" for k using K N trans True by (meson that transE) ultimately show ?thesis by (rule_tac x = I in exI, rule_tac x = J in exI, rule_tac x = "(K - {#a#}) + K'" in exI) (use z y N in \auto simp del: subset_mset.add_diff_assoc2 dest: in_diffD\) next case False then have "a \# I" by (metis N(2) union_iff union_single_eq_member z) moreover have "M0 = I + K - {#a#}" using N(2) z by force ultimately show ?thesis by (rule_tac x = "I - {#a#}" in exI, rule_tac x = "add_mset a J" in exI, rule_tac x = "K + K'" in exI) (use z y N False K in \auto simp: add.assoc\) qed qed +lemmas multp_implies_one_step = + mult_implies_one_step[of "{(x, y). r x y}" for r, folded multp_def transp_trans, simplified] + lemma one_step_implies_mult: assumes "J \ {#}" and "\k \ set_mset K. \j \ set_mset J. (k, j) \ r" shows "(I + K, I + J) \ mult r" using assms proof (induction "size J" arbitrary: I J K) case 0 then show ?case by auto next case (Suc n) note IH = this(1) and size_J = this(2)[THEN sym] obtain J' a where J: "J = add_mset a J'" using size_J by (blast dest: size_eq_Suc_imp_eq_union) show ?case proof (cases "J' = {#}") case True then show ?thesis using J Suc by (fastforce simp add: mult_def mult1_def) next case [simp]: False have K: "K = {#x \# K. (x, a) \ r#} + {#x \# K. (x, a) \ r#}" by simp have "(I + K, (I + {# x \# K. (x, a) \ r #}) + J') \ mult r" using IH[of J' "{# x \# K. (x, a) \ r#}" "I + {# x \# K. (x, a) \ r#}"] J Suc.prems K size_J by (auto simp: ac_simps) moreover have "(I + {#x \# K. (x, a) \ r#} + J', I + J) \ mult r" by (fastforce simp: J mult1_def mult_def) ultimately show ?thesis unfolding mult_def by simp qed qed +lemmas one_step_implies_multp = + one_step_implies_mult[of _ _ "{(x, y). r x y}" for r, folded multp_def, simplified] + lemma subset_implies_mult: assumes sub: "A \# B" shows "(A, B) \ mult r" proof - have ApBmA: "A + (B - A) = B" using sub by simp have BmA: "B - A \ {#}" using sub by (simp add: Diff_eq_empty_iff_mset subset_mset.less_le_not_le) thus ?thesis by (rule one_step_implies_mult[of "B - A" "{#}" _ A, unfolded ApBmA, simplified]) qed - -subsection \The multiset extension is cancellative for multiset union\ +lemmas subset_implies_multp = subset_implies_mult[of _ _ "{(x, y). r x y}" for r, folded multp_def] + + +subsubsection \The multiset extension is cancellative for multiset union\ lemma mult_cancel: assumes "trans s" and "irrefl s" shows "(X + Z, Y + Z) \ mult s \ (X, Y) \ mult s" (is "?L \ ?R") proof assume ?L thus ?R proof (induct Z) case (add z Z) obtain X' Y' Z' where *: "add_mset z X + Z = Z' + X'" "add_mset z Y + Z = Z' + Y'" "Y' \ {#}" "\x \ set_mset X'. \y \ set_mset Y'. (x, y) \ s" using mult_implies_one_step[OF \trans s\ add(2)] by auto consider Z2 where "Z' = add_mset z Z2" | X2 Y2 where "X' = add_mset z X2" "Y' = add_mset z Y2" using *(1,2) by (metis add_mset_remove_trivial_If insert_iff set_mset_add_mset_insert union_iff) thus ?case proof (cases) case 1 thus ?thesis using * one_step_implies_mult[of Y' X' s Z2] by (auto simp: add.commute[of _ "{#_#}"] add.assoc intro: add(1)) next case 2 then obtain y where "y \ set_mset Y2" "(z, y) \ s" using *(4) \irrefl s\ by (auto simp: irrefl_def) moreover from this transD[OF \trans s\ _ this(2)] have "x' \ set_mset X2 \ \y \ set_mset Y2. (x', y) \ s" for x' using 2 *(4)[rule_format, of x'] by auto ultimately show ?thesis using * one_step_implies_mult[of Y2 X2 s Z'] 2 by (force simp: add.commute[of "{#_#}"] add.assoc[symmetric] intro: add(1)) qed qed auto next assume ?R then obtain I J K where "Y = I + J" "X = I + K" "J \ {#}" "\k \ set_mset K. \j \ set_mset J. (k, j) \ s" using mult_implies_one_step[OF \trans s\] by blast thus ?L using one_step_implies_mult[of J K s "I + Z"] by (auto simp: ac_simps) qed +lemmas multp_cancel = + mult_cancel[of "{(x, y). r x y}" for r, + folded multp_def transp_trans irreflp_irrefl_eq, simplified] + lemmas mult_cancel_add_mset = mult_cancel[of _ _ "{#_#}", unfolded union_mset_add_mset_right add.comm_neutral] -lemma mult_cancel_max: +lemmas multp_cancel_add_mset = + mult_cancel_add_mset[of "{(x, y). r x y}" for r, + folded multp_def transp_trans irreflp_irrefl_eq, simplified] + +lemma mult_cancel_max0: assumes "trans s" and "irrefl s" shows "(X, Y) \ mult s \ (X - X \# Y, Y - X \# Y) \ mult s" (is "?L \ ?R") proof - have "X - X \# Y + X \# Y = X" "Y - X \# Y + X \# Y = Y" by (auto simp flip: count_inject) thus ?thesis using mult_cancel[OF assms, of "X - X \# Y" "X \# Y" "Y - X \# Y"] by auto qed +lemmas mult_cancel_max = mult_cancel_max0[simplified] + +lemmas multp_cancel_max = + mult_cancel_max[of "{(x, y). r x y}" for r, + folded multp_def transp_trans irreflp_irrefl_eq, simplified] + + +subsubsection \Partial-order properties\ + +lemma mult1_lessE: + assumes "(N, M) \ mult1 {(a, b). r a b}" and "asymp r" + obtains a M0 K where "M = add_mset a M0" "N = M0 + K" + "a \# K" "\b. b \# K \ r b a" +proof - + from assms obtain a M0 K where "M = add_mset a M0" "N = M0 + K" and + *: "b \# K \ r b a" for b by (blast elim: mult1E) + moreover from * [of a] have "a \# K" + using \asymp r\ by (meson asymp.cases) + ultimately show thesis by (auto intro: that) +qed + +lemma trans_mult: "trans r \ trans (mult r)" + by (simp add: mult_def) + +lemma transp_multp: "transp r \ transp (multp r)" + unfolding multp_def transp_trans_eq + by (fact trans_mult[of "{(x, y). r x y}" for r, folded transp_trans]) + +lemma irrefl_mult: + assumes "trans r" "irrefl r" + shows "irrefl (mult r)" +proof (intro irreflI notI) + fix M + assume "(M, M) \ mult r" + then obtain I J K where "M = I + J" and "M = I + K" + and "J \ {#}" and "(\k\set_mset K. \j\set_mset J. (k, j) \ r)" + using mult_implies_one_step[OF \trans r\] by blast + then have *: "K \ {#}" and **: "\k\set_mset K. \j\set_mset K. (k, j) \ r" by auto + have "finite (set_mset K)" by simp + hence "set_mset K = {}" + using ** + proof (induction rule: finite_induct) + case empty + thus ?case by simp + next + case (insert x F) + have False + using \irrefl r\[unfolded irrefl_def, rule_format] + using \trans r\[THEN transD] + by (metis equals0D insert.IH insert.prems insertE insertI1 insertI2) + thus ?case .. + qed + with * show False by simp +qed + +lemmas irreflp_multp = + irrefl_mult[of "{(x, y). r x y}" for r, + folded transp_trans_eq irreflp_irrefl_eq, simplified, folded multp_def] + +instantiation multiset :: (preorder) order begin + +definition less_multiset :: "'a multiset \ 'a multiset \ bool" + where "M < N \ multp (<) M N" + +definition less_eq_multiset :: "'a multiset \ 'a multiset \ bool" + where "less_eq_multiset M N \ M < N \ M = N" + +instance +proof intro_classes + fix M N :: "'a multiset" + show "(M < N) = (M \ N \ \ N \ M)" + unfolding less_eq_multiset_def less_multiset_def + by (metis irreflp_def irreflp_less irreflp_multp transpE transp_less transp_multp) +next + fix M :: "'a multiset" + show "M \ M" + unfolding less_eq_multiset_def + by simp +next + fix M1 M2 M3 :: "'a multiset" + show "M1 \ M2 \ M2 \ M3 \ M1 \ M3" + unfolding less_eq_multiset_def less_multiset_def + using transp_multp[OF transp_less, THEN transpD] + by blast +next + fix M N :: "'a multiset" + show "M \ N \ N \ M \ M = N" + unfolding less_eq_multiset_def less_multiset_def + using transp_multp[OF transp_less, THEN transpD] + using irreflp_multp[OF transp_less irreflp_less, unfolded irreflp_def, rule_format] + by blast +qed + +end + +lemma mset_le_irrefl [elim!]: + fixes M :: "'a::preorder multiset" + shows "M < M \ R" + by simp + +lemma wfP_less_multiset[simp]: + assumes wfP_less: "wfP ((<) :: ('a :: preorder) \ 'a \ bool)" + shows "wfP ((<) :: 'a multiset \ 'a multiset \ bool)" + using wfP_multp[OF wfP_less] less_multiset_def + by (metis wfPUNIVI wfP_induct) + subsection \Quasi-executable version of the multiset extension\ text \ Predicate variants of \mult\ and the reflexive closure of \mult\, which are executable whenever the given predicate \P\ is. Together with the standard code equations for \(\#\) and \(-\) this should yield quadratic - (with respect to calls to \P\) implementations of \multp\ and \multeqp\. + (with respect to calls to \P\) implementations of \multp_code\ and \multeqp_code\. \ -definition multp :: "('a \ 'a \ bool) \ 'a multiset \ 'a multiset \ bool" where - "multp P N M = +definition multp_code :: "('a \ 'a \ bool) \ 'a multiset \ 'a multiset \ bool" where + "multp_code P N M = (let Z = M \# N; X = M - Z in X \ {#} \ (let Y = N - Z in (\y \ set_mset Y. \x \ set_mset X. P y x)))" -definition multeqp :: "('a \ 'a \ bool) \ 'a multiset \ 'a multiset \ bool" where - "multeqp P N M = +definition multeqp_code :: "('a \ 'a \ bool) \ 'a multiset \ 'a multiset \ bool" where + "multeqp_code P N M = (let Z = M \# N; X = M - Z; Y = N - Z in (\y \ set_mset Y. \x \ set_mset X. P y x))" -lemma multp_iff: +lemma multp_code_iff_mult: assumes "irrefl R" and "trans R" and [simp]: "\x y. P x y \ (x, y) \ R" - shows "multp P N M \ (N, M) \ mult R" (is "?L \ ?R") + shows "multp_code P N M \ (N, M) \ mult R" (is "?L \ ?R") proof - have *: "M \# N + (N - M \# N) = N" "M \# N + (M - M \# N) = M" "(M - M \# N) \# (N - M \# N) = {#}" by (auto simp flip: count_inject) show ?thesis proof assume ?L thus ?R using one_step_implies_mult[of "M - M \# N" "N - M \# N" R "M \# N"] * - by (auto simp: multp_def Let_def) + by (auto simp: multp_code_def Let_def) next { fix I J K :: "'a multiset" assume "(I + J) \# (I + K) = {#}" then have "I = {#}" by (metis inter_union_distrib_right union_eq_empty) } note [dest!] = this assume ?R thus ?L using mult_implies_one_step[OF assms(2), of "N - M \# N" "M - M \# N"] - mult_cancel_max[OF assms(2,1), of "N" "M"] * by (auto simp: multp_def) + mult_cancel_max[OF assms(2,1), of "N" "M"] * by (auto simp: multp_code_def) qed qed -lemma multeqp_iff: +lemma multp_code_eq_multp: "irreflp r \ transp r \ multp_code r = multp r" + using multp_code_iff_mult[of "{(x, y). r x y}" r for r, + folded irreflp_irrefl_eq transp_trans multp_def, simplified] + by blast + +lemma multeqp_code_iff_reflcl_mult: assumes "irrefl R" and "trans R" and "\x y. P x y \ (x, y) \ R" - shows "multeqp P N M \ (N, M) \ (mult R)\<^sup>=" + shows "multeqp_code P N M \ (N, M) \ (mult R)\<^sup>=" proof - { assume "N \ M" "M - M \# N = {#}" then obtain y where "count N y \ count M y" by (auto simp flip: count_inject) then have "\y. count M y < count N y" using \M - M \# N = {#}\ by (auto simp flip: count_inject dest!: le_neq_implies_less fun_cong[of _ _ y]) } - then have "multeqp P N M \ multp P N M \ N = M" - by (auto simp: multeqp_def multp_def Let_def in_diff_count) - thus ?thesis using multp_iff[OF assms] by simp -qed - - -subsubsection \Partial-order properties\ - -lemma (in preorder) mult1_lessE: - assumes "(N, M) \ mult1 {(a, b). a < b}" - obtains a M0 K where "M = add_mset a M0" "N = M0 + K" - "a \# K" "\b. b \# K \ b < a" -proof - - from assms obtain a M0 K where "M = add_mset a M0" "N = M0 + K" and - *: "b \# K \ b < a" for b by (blast elim: mult1E) - moreover from * [of a] have "a \# K" by auto - ultimately show thesis by (auto intro: that) + then have "multeqp_code P N M \ multp_code P N M \ N = M" + by (auto simp: multeqp_code_def multp_code_def Let_def in_diff_count) + thus ?thesis using multp_code_iff_mult[OF assms] by simp qed -instantiation multiset :: (preorder) order -begin - -definition less_multiset :: "'a multiset \ 'a multiset \ bool" - where "M' < M \ (M', M) \ mult {(x', x). x' < x}" - -definition less_eq_multiset :: "'a multiset \ 'a multiset \ bool" - where "less_eq_multiset M' M \ M' < M \ M' = M" - -instance -proof - - have irrefl: "\ M < M" for M :: "'a multiset" - proof - assume "M < M" - then have MM: "(M, M) \ mult {(x, y). x < y}" by (simp add: less_multiset_def) - have "trans {(x'::'a, x). x' < x}" - by (metis (mono_tags, lifting) case_prodD case_prodI less_trans mem_Collect_eq transI) - moreover note MM - ultimately have "\I J K. M = I + J \ M = I + K - \ J \ {#} \ (\k\set_mset K. \j\set_mset J. (k, j) \ {(x, y). x < y})" - by (rule mult_implies_one_step) - then obtain I J K where "M = I + J" and "M = I + K" - and "J \ {#}" and "(\k\set_mset K. \j\set_mset J. (k, j) \ {(x, y). x < y})" by blast - then have *: "K \ {#}" and **: "\k\set_mset K. \j\set_mset K. k < j" by auto - have "finite (set_mset K)" by simp - moreover note ** - ultimately have "set_mset K = {}" - by (induct rule: finite_induct) (auto intro: order_less_trans) - with * show False by simp - qed - have trans: "K < M \ M < N \ K < N" for K M N :: "'a multiset" - unfolding less_multiset_def mult_def by (blast intro: trancl_trans) - show "OFCLASS('a multiset, order_class)" - by standard (auto simp add: less_eq_multiset_def irrefl dest: trans) -qed - -end - -lemma mset_le_irrefl [elim!]: - fixes M :: "'a::preorder multiset" - shows "M < M \ R" - by simp +lemma multeqp_code_eq_reflclp_multp: "irreflp r \ transp r \ multeqp_code r = (multp r)\<^sup>=\<^sup>=" + using multeqp_code_iff_reflcl_mult[of "{(x, y). r x y}" r for r, + folded irreflp_irrefl_eq transp_trans, simplified, folded multp_def] + by blast subsubsection \Monotonicity of multiset union\ lemma mult1_union: "(B, D) \ mult1 r \ (C + B, C + D) \ mult1 r" by (force simp: mult1_def) lemma union_le_mono2: "B < D \ C + B < C + (D::'a::preorder multiset)" -apply (unfold less_multiset_def mult_def) +apply (unfold less_multiset_def multp_def mult_def) apply (erule trancl_induct) apply (blast intro: mult1_union) apply (blast intro: mult1_union trancl_trans) done lemma union_le_mono1: "B < D \ B + C < D + (C::'a::preorder multiset)" apply (subst add.commute [of B C]) apply (subst add.commute [of D C]) apply (erule union_le_mono2) done lemma union_less_mono: fixes A B C D :: "'a::preorder multiset" shows "A < C \ B < D \ A + B < C + D" by (blast intro!: union_le_mono1 union_le_mono2 less_trans) instantiation multiset :: (preorder) ordered_ab_semigroup_add begin instance by standard (auto simp add: less_eq_multiset_def intro: union_le_mono2) end subsubsection \Termination proofs with multiset orders\ lemma multi_member_skip: "x \# XS \ x \# {# y #} + XS" and multi_member_this: "x \# {# x #} + XS" and multi_member_last: "x \# {# x #}" by auto definition "ms_strict = mult pair_less" definition "ms_weak = ms_strict \ Id" lemma ms_reduction_pair: "reduction_pair (ms_strict, ms_weak)" unfolding reduction_pair_def ms_strict_def ms_weak_def pair_less_def by (auto intro: wf_mult1 wf_trancl simp: mult_def) lemma smsI: "(set_mset A, set_mset B) \ max_strict \ (Z + A, Z + B) \ ms_strict" unfolding ms_strict_def by (rule one_step_implies_mult) (auto simp add: max_strict_def pair_less_def elim!:max_ext.cases) lemma wmsI: "(set_mset A, set_mset B) \ max_strict \ A = {#} \ B = {#} \ (Z + A, Z + B) \ ms_weak" unfolding ms_weak_def ms_strict_def by (auto simp add: pair_less_def max_strict_def elim!:max_ext.cases intro: one_step_implies_mult) inductive pw_leq where pw_leq_empty: "pw_leq {#} {#}" | pw_leq_step: "\(x,y) \ pair_leq; pw_leq X Y \ \ pw_leq ({#x#} + X) ({#y#} + Y)" lemma pw_leq_lstep: "(x, y) \ pair_leq \ pw_leq {#x#} {#y#}" by (drule pw_leq_step) (rule pw_leq_empty, simp) lemma pw_leq_split: assumes "pw_leq X Y" shows "\A B Z. X = A + Z \ Y = B + Z \ ((set_mset A, set_mset B) \ max_strict \ (B = {#} \ A = {#}))" using assms proof induct case pw_leq_empty thus ?case by auto next case (pw_leq_step x y X Y) then obtain A B Z where [simp]: "X = A + Z" "Y = B + Z" and 1[simp]: "(set_mset A, set_mset B) \ max_strict \ (B = {#} \ A = {#})" by auto from pw_leq_step consider "x = y" | "(x, y) \ pair_less" unfolding pair_leq_def by auto thus ?case proof cases case [simp]: 1 have "{#x#} + X = A + ({#y#}+Z) \ {#y#} + Y = B + ({#y#}+Z) \ ((set_mset A, set_mset B) \ max_strict \ (B = {#} \ A = {#}))" by auto thus ?thesis by blast next case 2 let ?A' = "{#x#} + A" and ?B' = "{#y#} + B" have "{#x#} + X = ?A' + Z" "{#y#} + Y = ?B' + Z" by auto moreover have "(set_mset ?A', set_mset ?B') \ max_strict" using 1 2 unfolding max_strict_def by (auto elim!: max_ext.cases) ultimately show ?thesis by blast qed qed lemma assumes pwleq: "pw_leq Z Z'" shows ms_strictI: "(set_mset A, set_mset B) \ max_strict \ (Z + A, Z' + B) \ ms_strict" and ms_weakI1: "(set_mset A, set_mset B) \ max_strict \ (Z + A, Z' + B) \ ms_weak" and ms_weakI2: "(Z + {#}, Z' + {#}) \ ms_weak" proof - from pw_leq_split[OF pwleq] obtain A' B' Z'' where [simp]: "Z = A' + Z''" "Z' = B' + Z''" and mx_or_empty: "(set_mset A', set_mset B') \ max_strict \ (A' = {#} \ B' = {#})" by blast { assume max: "(set_mset A, set_mset B) \ max_strict" from mx_or_empty have "(Z'' + (A + A'), Z'' + (B + B')) \ ms_strict" proof assume max': "(set_mset A', set_mset B') \ max_strict" with max have "(set_mset (A + A'), set_mset (B + B')) \ max_strict" by (auto simp: max_strict_def intro: max_ext_additive) thus ?thesis by (rule smsI) next assume [simp]: "A' = {#} \ B' = {#}" show ?thesis by (rule smsI) (auto intro: max) qed thus "(Z + A, Z' + B) \ ms_strict" by (simp add: ac_simps) thus "(Z + A, Z' + B) \ ms_weak" by (simp add: ms_weak_def) } from mx_or_empty have "(Z'' + A', Z'' + B') \ ms_weak" by (rule wmsI) thus "(Z + {#}, Z' + {#}) \ ms_weak" by (simp add: ac_simps) qed lemma empty_neutral: "{#} + x = x" "x + {#} = x" and nonempty_plus: "{# x #} + rs \ {#}" and nonempty_single: "{# x #} \ {#}" by auto setup \ let fun msetT T = \<^Type>\multiset T\; fun mk_mset T [] = \<^instantiate>\'a = T in term \{#}\\ | mk_mset T [x] = \<^instantiate>\'a = T and x in term \{#x#}\\ | mk_mset T (x :: xs) = \<^Const>\plus \msetT T\ for \mk_mset T [x]\ \mk_mset T xs\\ fun mset_member_tac ctxt m i = if m <= 0 then resolve_tac ctxt @{thms multi_member_this} i ORELSE resolve_tac ctxt @{thms multi_member_last} i else resolve_tac ctxt @{thms multi_member_skip} i THEN mset_member_tac ctxt (m - 1) i fun mset_nonempty_tac ctxt = resolve_tac ctxt @{thms nonempty_plus} ORELSE' resolve_tac ctxt @{thms nonempty_single} fun regroup_munion_conv ctxt = Function_Lib.regroup_conv ctxt \<^const_abbrev>\empty_mset\ \<^const_name>\plus\ (map (fn t => t RS eq_reflection) (@{thms ac_simps} @ @{thms empty_neutral})) fun unfold_pwleq_tac ctxt i = (resolve_tac ctxt @{thms pw_leq_step} i THEN (fn st => unfold_pwleq_tac ctxt (i + 1) st)) ORELSE (resolve_tac ctxt @{thms pw_leq_lstep} i) ORELSE (resolve_tac ctxt @{thms pw_leq_empty} i) val set_mset_simps = [@{thm set_mset_empty}, @{thm set_mset_single}, @{thm set_mset_union}, @{thm Un_insert_left}, @{thm Un_empty_left}] in ScnpReconstruct.multiset_setup (ScnpReconstruct.Multiset { msetT=msetT, mk_mset=mk_mset, mset_regroup_conv=regroup_munion_conv, mset_member_tac=mset_member_tac, mset_nonempty_tac=mset_nonempty_tac, mset_pwleq_tac=unfold_pwleq_tac, set_of_simps=set_mset_simps, smsI'= @{thm ms_strictI}, wmsI2''= @{thm ms_weakI2}, wmsI1= @{thm ms_weakI1}, reduction_pair = @{thm ms_reduction_pair} }) end \ subsection \Legacy theorem bindings\ lemmas multi_count_eq = multiset_eq_iff [symmetric] lemma union_commute: "M + N = N + (M::'a multiset)" by (fact add.commute) lemma union_assoc: "(M + N) + K = M + (N + (K::'a multiset))" by (fact add.assoc) lemma union_lcomm: "M + (N + K) = N + (M + (K::'a multiset))" by (fact add.left_commute) lemmas union_ac = union_assoc union_commute union_lcomm add_mset_commute lemma union_right_cancel: "M + K = N + K \ M = (N::'a multiset)" by (fact add_right_cancel) lemma union_left_cancel: "K + M = K + N \ M = (N::'a multiset)" by (fact add_left_cancel) lemma multi_union_self_other_eq: "(A::'a multiset) + X = A + Y \ X = Y" by (fact add_left_imp_eq) lemma mset_subset_trans: "(M::'a multiset) \# K \ K \# N \ M \# N" by (fact subset_mset.less_trans) lemma multiset_inter_commute: "A \# B = B \# A" by (fact subset_mset.inf.commute) lemma multiset_inter_assoc: "A \# (B \# C) = A \# B \# C" by (fact subset_mset.inf.assoc [symmetric]) lemma multiset_inter_left_commute: "A \# (B \# C) = B \# (A \# C)" by (fact subset_mset.inf.left_commute) lemmas multiset_inter_ac = multiset_inter_commute multiset_inter_assoc multiset_inter_left_commute lemma mset_le_not_refl: "\ M < (M::'a::preorder multiset)" by (fact less_irrefl) lemma mset_le_trans: "K < M \ M < N \ K < (N::'a::preorder multiset)" by (fact less_trans) lemma mset_le_not_sym: "M < N \ \ N < (M::'a::preorder multiset)" by (fact less_not_sym) lemma mset_le_asym: "M < N \ (\ P \ N < (M::'a::preorder multiset)) \ P" by (fact less_asym) declaration \ let fun multiset_postproc _ maybe_name all_values (T as Type (_, [elem_T])) (Const _ $ t') = let val (maybe_opt, ps) = Nitpick_Model.dest_plain_fun t' ||> (~~) ||> map (apsnd (snd o HOLogic.dest_number)) fun elems_for t = (case AList.lookup (=) ps t of SOME n => replicate n t | NONE => [Const (maybe_name, elem_T --> elem_T) $ t]) in (case maps elems_for (all_values elem_T) @ (if maybe_opt then [Const (Nitpick_Model.unrep_mixfix (), elem_T)] else []) of [] => \<^Const>\Groups.zero T\ | ts => foldl1 (fn (s, t) => \<^Const>\add_mset elem_T for s t\) ts) end | multiset_postproc _ _ _ _ t = t in Nitpick_Model.register_term_postprocessor \<^typ>\'a multiset\ multiset_postproc end \ subsection \Naive implementation using lists\ code_datatype mset lemma [code]: "{#} = mset []" by simp lemma [code]: "add_mset x (mset xs) = mset (x # xs)" by simp lemma [code]: "Multiset.is_empty (mset xs) \ List.null xs" by (simp add: Multiset.is_empty_def List.null_def) lemma union_code [code]: "mset xs + mset ys = mset (xs @ ys)" by simp lemma [code]: "image_mset f (mset xs) = mset (map f xs)" by simp lemma [code]: "filter_mset f (mset xs) = mset (filter f xs)" by simp lemma [code]: "mset xs - mset ys = mset (fold remove1 ys xs)" by (rule sym, induct ys arbitrary: xs) (simp_all add: diff_add diff_right_commute diff_diff_add) lemma [code]: "mset xs \# mset ys = mset (snd (fold (\x (ys, zs). if x \ set ys then (remove1 x ys, x # zs) else (ys, zs)) xs (ys, [])))" proof - have "\zs. mset (snd (fold (\x (ys, zs). if x \ set ys then (remove1 x ys, x # zs) else (ys, zs)) xs (ys, zs))) = (mset xs \# mset ys) + mset zs" by (induct xs arbitrary: ys) (auto simp add: inter_add_right1 inter_add_right2 ac_simps) then show ?thesis by simp qed lemma [code]: "mset xs \# mset ys = mset (case_prod append (fold (\x (ys, zs). (remove1 x ys, x # zs)) xs (ys, [])))" proof - have "\zs. mset (case_prod append (fold (\x (ys, zs). (remove1 x ys, x # zs)) xs (ys, zs))) = (mset xs \# mset ys) + mset zs" by (induct xs arbitrary: ys) (simp_all add: multiset_eq_iff) then show ?thesis by simp qed declare in_multiset_in_set [code_unfold] lemma [code]: "count (mset xs) x = fold (\y. if x = y then Suc else id) xs 0" proof - have "\n. fold (\y. if x = y then Suc else id) xs n = count (mset xs) x + n" by (induct xs) simp_all then show ?thesis by simp qed declare set_mset_mset [code] declare sorted_list_of_multiset_mset [code] lemma [code]: \ \not very efficient, but representation-ignorant!\ "mset_set A = mset (sorted_list_of_set A)" apply (cases "finite A") apply simp_all apply (induct A rule: finite_induct) apply simp_all done declare size_mset [code] fun subset_eq_mset_impl :: "'a list \ 'a list \ bool option" where "subset_eq_mset_impl [] ys = Some (ys \ [])" | "subset_eq_mset_impl (Cons x xs) ys = (case List.extract ((=) x) ys of None \ None | Some (ys1,_,ys2) \ subset_eq_mset_impl xs (ys1 @ ys2))" lemma subset_eq_mset_impl: "(subset_eq_mset_impl xs ys = None \ \ mset xs \# mset ys) \ (subset_eq_mset_impl xs ys = Some True \ mset xs \# mset ys) \ (subset_eq_mset_impl xs ys = Some False \ mset xs = mset ys)" proof (induct xs arbitrary: ys) case (Nil ys) show ?case by (auto simp: subset_mset.zero_less_iff_neq_zero) next case (Cons x xs ys) show ?case proof (cases "List.extract ((=) x) ys") case None hence x: "x \ set ys" by (simp add: extract_None_iff) { assume "mset (x # xs) \# mset ys" from set_mset_mono[OF this] x have False by simp } note nle = this moreover { assume "mset (x # xs) \# mset ys" hence "mset (x # xs) \# mset ys" by auto from nle[OF this] have False . } ultimately show ?thesis using None by auto next case (Some res) obtain ys1 y ys2 where res: "res = (ys1,y,ys2)" by (cases res, auto) note Some = Some[unfolded res] from extract_SomeE[OF Some] have "ys = ys1 @ x # ys2" by simp hence id: "mset ys = add_mset x (mset (ys1 @ ys2))" by auto show ?thesis unfolding subset_eq_mset_impl.simps unfolding Some option.simps split unfolding id using Cons[of "ys1 @ ys2"] unfolding subset_mset_def subseteq_mset_def by auto qed qed lemma [code]: "mset xs \# mset ys \ subset_eq_mset_impl xs ys \ None" using subset_eq_mset_impl[of xs ys] by (cases "subset_eq_mset_impl xs ys", auto) lemma [code]: "mset xs \# mset ys \ subset_eq_mset_impl xs ys = Some True" using subset_eq_mset_impl[of xs ys] by (cases "subset_eq_mset_impl xs ys", auto) instantiation multiset :: (equal) equal begin definition [code del]: "HOL.equal A (B :: 'a multiset) \ A = B" lemma [code]: "HOL.equal (mset xs) (mset ys) \ subset_eq_mset_impl xs ys = Some False" unfolding equal_multiset_def using subset_eq_mset_impl[of xs ys] by (cases "subset_eq_mset_impl xs ys", auto) instance by standard (simp add: equal_multiset_def) end declare sum_mset_sum_list [code] lemma [code]: "prod_mset (mset xs) = fold times xs 1" proof - have "\x. fold times xs x = prod_mset (mset xs) * x" by (induct xs) (simp_all add: ac_simps) then show ?thesis by simp qed text \ Exercise for the casual reader: add implementations for \<^term>\(\)\ and \<^term>\(<)\ (multiset order). \ text \Quickcheck generators\ context includes term_syntax begin definition msetify :: "'a::typerep list \ (unit \ Code_Evaluation.term) \ 'a multiset \ (unit \ Code_Evaluation.term)" where [code_unfold]: "msetify xs = Code_Evaluation.valtermify mset {\} xs" end instantiation multiset :: (random) random begin context includes state_combinator_syntax begin definition "Quickcheck_Random.random i = Quickcheck_Random.random i \\ (\xs. Pair (msetify xs))" instance .. end end instantiation multiset :: (full_exhaustive) full_exhaustive begin definition full_exhaustive_multiset :: "('a multiset \ (unit \ term) \ (bool \ term list) option) \ natural \ (bool \ term list) option" where "full_exhaustive_multiset f i = Quickcheck_Exhaustive.full_exhaustive (\xs. f (msetify xs)) i" instance .. end hide_const (open) msetify subsection \BNF setup\ definition rel_mset where "rel_mset R X Y \ (\xs ys. mset xs = X \ mset ys = Y \ list_all2 R xs ys)" lemma mset_zip_take_Cons_drop_twice: assumes "length xs = length ys" "j \ length xs" shows "mset (zip (take j xs @ x # drop j xs) (take j ys @ y # drop j ys)) = add_mset (x,y) (mset (zip xs ys))" using assms proof (induct xs ys arbitrary: x y j rule: list_induct2) case Nil thus ?case by simp next case (Cons x xs y ys) thus ?case proof (cases "j = 0") case True thus ?thesis by simp next case False then obtain k where k: "j = Suc k" by (cases j) simp hence "k \ length xs" using Cons.prems by auto hence "mset (zip (take k xs @ x # drop k xs) (take k ys @ y # drop k ys)) = add_mset (x,y) (mset (zip xs ys))" by (rule Cons.hyps(2)) thus ?thesis unfolding k by auto qed qed lemma ex_mset_zip_left: assumes "length xs = length ys" "mset xs' = mset xs" shows "\ys'. length ys' = length xs' \ mset (zip xs' ys') = mset (zip xs ys)" using assms proof (induct xs ys arbitrary: xs' rule: list_induct2) case Nil thus ?case by auto next case (Cons x xs y ys xs') obtain j where j_len: "j < length xs'" and nth_j: "xs' ! j = x" by (metis Cons.prems in_set_conv_nth list.set_intros(1) mset_eq_setD) define xsa where "xsa = take j xs' @ drop (Suc j) xs'" have "mset xs' = {#x#} + mset xsa" unfolding xsa_def using j_len nth_j by (metis Cons_nth_drop_Suc union_mset_add_mset_right add_mset_remove_trivial add_diff_cancel_left' append_take_drop_id mset.simps(2) mset_append) hence ms_x: "mset xsa = mset xs" by (simp add: Cons.prems) then obtain ysa where len_a: "length ysa = length xsa" and ms_a: "mset (zip xsa ysa) = mset (zip xs ys)" using Cons.hyps(2) by blast define ys' where "ys' = take j ysa @ y # drop j ysa" have xs': "xs' = take j xsa @ x # drop j xsa" using ms_x j_len nth_j Cons.prems xsa_def by (metis append_eq_append_conv append_take_drop_id diff_Suc_Suc Cons_nth_drop_Suc length_Cons length_drop size_mset) have j_len': "j \ length xsa" using j_len xs' xsa_def by (metis add_Suc_right append_take_drop_id length_Cons length_append less_eq_Suc_le not_less) have "length ys' = length xs'" unfolding ys'_def using Cons.prems len_a ms_x by (metis add_Suc_right append_take_drop_id length_Cons length_append mset_eq_length) moreover have "mset (zip xs' ys') = mset (zip (x # xs) (y # ys))" unfolding xs' ys'_def by (rule trans[OF mset_zip_take_Cons_drop_twice]) (auto simp: len_a ms_a j_len') ultimately show ?case by blast qed lemma list_all2_reorder_left_invariance: assumes rel: "list_all2 R xs ys" and ms_x: "mset xs' = mset xs" shows "\ys'. list_all2 R xs' ys' \ mset ys' = mset ys" proof - have len: "length xs = length ys" using rel list_all2_conv_all_nth by auto obtain ys' where len': "length xs' = length ys'" and ms_xy: "mset (zip xs' ys') = mset (zip xs ys)" using len ms_x by (metis ex_mset_zip_left) have "list_all2 R xs' ys'" using assms(1) len' ms_xy unfolding list_all2_iff by (blast dest: mset_eq_setD) moreover have "mset ys' = mset ys" using len len' ms_xy map_snd_zip mset_map by metis ultimately show ?thesis by blast qed lemma ex_mset: "\xs. mset xs = X" by (induct X) (simp, metis mset.simps(2)) inductive pred_mset :: "('a \ bool) \ 'a multiset \ bool" where "pred_mset P {#}" | "\P a; pred_mset P M\ \ pred_mset P (add_mset a M)" lemma pred_mset_iff: \ \TODO: alias for \<^const>\Multiset.Ball\\ \pred_mset P M \ Multiset.Ball M P\ (is \?P \ ?Q\) proof assume ?P then show ?Q by induction simp_all next assume ?Q then show ?P by (induction M) (auto intro: pred_mset.intros) qed bnf "'a multiset" map: image_mset sets: set_mset bd: natLeq wits: "{#}" rel: rel_mset pred: pred_mset proof - show "image_mset id = id" by (rule image_mset.id) show "image_mset (g \ f) = image_mset g \ image_mset f" for f g unfolding comp_def by (rule ext) (simp add: comp_def image_mset.compositionality) show "(\z. z \ set_mset X \ f z = g z) \ image_mset f X = image_mset g X" for f g X by (induct X) simp_all show "set_mset \ image_mset f = (`) f \ set_mset" for f by auto show "card_order natLeq" by (rule natLeq_card_order) show "BNF_Cardinal_Arithmetic.cinfinite natLeq" by (rule natLeq_cinfinite) show "ordLeq3 (card_of (set_mset X)) natLeq" for X by transfer (auto intro!: ordLess_imp_ordLeq simp: finite_iff_ordLess_natLeq[symmetric]) show "rel_mset R OO rel_mset S \ rel_mset (R OO S)" for R S unfolding rel_mset_def[abs_def] OO_def apply clarify subgoal for X Z Y xs ys' ys zs apply (drule list_all2_reorder_left_invariance [where xs = ys' and ys = zs and xs' = ys]) apply (auto intro: list_all2_trans) done done show "rel_mset R = (\x y. \z. set_mset z \ {(x, y). R x y} \ image_mset fst z = x \ image_mset snd z = y)" for R unfolding rel_mset_def[abs_def] apply (rule ext)+ apply safe apply (rule_tac x = "mset (zip xs ys)" in exI; auto simp: in_set_zip list_all2_iff simp flip: mset_map) apply (rename_tac XY) apply (cut_tac X = XY in ex_mset) apply (erule exE) apply (rename_tac xys) apply (rule_tac x = "map fst xys" in exI) apply (auto simp: mset_map) apply (rule_tac x = "map snd xys" in exI) apply (auto simp: mset_map list_all2I subset_eq zip_map_fst_snd) done show "z \ set_mset {#} \ False" for z by auto show "pred_mset P = (\x. Ball (set_mset x) P)" for P by (simp add: fun_eq_iff pred_mset_iff) qed inductive rel_mset' :: \('a \ 'b \ bool) \ 'a multiset \ 'b multiset \ bool\ where Zero[intro]: "rel_mset' R {#} {#}" | Plus[intro]: "\R a b; rel_mset' R M N\ \ rel_mset' R (add_mset a M) (add_mset b N)" lemma rel_mset_Zero: "rel_mset R {#} {#}" unfolding rel_mset_def Grp_def by auto declare multiset.count[simp] declare count_Abs_multiset[simp] declare multiset.count_inverse[simp] lemma rel_mset_Plus: assumes ab: "R a b" and MN: "rel_mset R M N" shows "rel_mset R (add_mset a M) (add_mset b N)" proof - have "\ya. add_mset a (image_mset fst y) = image_mset fst ya \ add_mset b (image_mset snd y) = image_mset snd ya \ set_mset ya \ {(x, y). R x y}" if "R a b" and "set_mset y \ {(x, y). R x y}" for y using that by (intro exI[of _ "add_mset (a,b) y"]) auto thus ?thesis using assms unfolding multiset.rel_compp_Grp Grp_def by blast qed lemma rel_mset'_imp_rel_mset: "rel_mset' R M N \ rel_mset R M N" by (induct rule: rel_mset'.induct) (auto simp: rel_mset_Zero rel_mset_Plus) lemma rel_mset_size: "rel_mset R M N \ size M = size N" unfolding multiset.rel_compp_Grp Grp_def by auto lemma rel_mset_Zero_iff [simp]: shows "rel_mset rel {#} Y \ Y = {#}" and "rel_mset rel X {#} \ X = {#}" by (auto simp add: rel_mset_Zero dest: rel_mset_size) lemma multiset_induct2[case_names empty addL addR]: assumes empty: "P {#} {#}" and addL: "\a M N. P M N \ P (add_mset a M) N" and addR: "\a M N. P M N \ P M (add_mset a N)" shows "P M N" apply(induct N rule: multiset_induct) apply(induct M rule: multiset_induct, rule empty, erule addL) apply(induct M rule: multiset_induct, erule addR, erule addR) done lemma multiset_induct2_size[consumes 1, case_names empty add]: assumes c: "size M = size N" and empty: "P {#} {#}" and add: "\a b M N a b. P M N \ P (add_mset a M) (add_mset b N)" shows "P M N" using c proof (induct M arbitrary: N rule: measure_induct_rule[of size]) case (less M) show ?case proof(cases "M = {#}") case True hence "N = {#}" using less.prems by auto thus ?thesis using True empty by auto next case False then obtain M1 a where M: "M = add_mset a M1" by (metis multi_nonempty_split) have "N \ {#}" using False less.prems by auto then obtain N1 b where N: "N = add_mset b N1" by (metis multi_nonempty_split) have "size M1 = size N1" using less.prems unfolding M N by auto thus ?thesis using M N less.hyps add by auto qed qed lemma msed_map_invL: assumes "image_mset f (add_mset a M) = N" shows "\N1. N = add_mset (f a) N1 \ image_mset f M = N1" proof - have "f a \# N" using assms multiset.set_map[of f "add_mset a M"] by auto then obtain N1 where N: "N = add_mset (f a) N1" using multi_member_split by metis have "image_mset f M = N1" using assms unfolding N by simp thus ?thesis using N by blast qed lemma msed_map_invR: assumes "image_mset f M = add_mset b N" shows "\M1 a. M = add_mset a M1 \ f a = b \ image_mset f M1 = N" proof - obtain a where a: "a \# M" and fa: "f a = b" using multiset.set_map[of f M] unfolding assms by (metis image_iff union_single_eq_member) then obtain M1 where M: "M = add_mset a M1" using multi_member_split by metis have "image_mset f M1 = N" using assms unfolding M fa[symmetric] by simp thus ?thesis using M fa by blast qed lemma msed_rel_invL: assumes "rel_mset R (add_mset a M) N" shows "\N1 b. N = add_mset b N1 \ R a b \ rel_mset R M N1" proof - obtain K where KM: "image_mset fst K = add_mset a M" and KN: "image_mset snd K = N" and sK: "set_mset K \ {(a, b). R a b}" using assms unfolding multiset.rel_compp_Grp Grp_def by auto obtain K1 ab where K: "K = add_mset ab K1" and a: "fst ab = a" and K1M: "image_mset fst K1 = M" using msed_map_invR[OF KM] by auto obtain N1 where N: "N = add_mset (snd ab) N1" and K1N1: "image_mset snd K1 = N1" using msed_map_invL[OF KN[unfolded K]] by auto have Rab: "R a (snd ab)" using sK a unfolding K by auto have "rel_mset R M N1" using sK K1M K1N1 unfolding K multiset.rel_compp_Grp Grp_def by auto thus ?thesis using N Rab by auto qed lemma msed_rel_invR: assumes "rel_mset R M (add_mset b N)" shows "\M1 a. M = add_mset a M1 \ R a b \ rel_mset R M1 N" proof - obtain K where KN: "image_mset snd K = add_mset b N" and KM: "image_mset fst K = M" and sK: "set_mset K \ {(a, b). R a b}" using assms unfolding multiset.rel_compp_Grp Grp_def by auto obtain K1 ab where K: "K = add_mset ab K1" and b: "snd ab = b" and K1N: "image_mset snd K1 = N" using msed_map_invR[OF KN] by auto obtain M1 where M: "M = add_mset (fst ab) M1" and K1M1: "image_mset fst K1 = M1" using msed_map_invL[OF KM[unfolded K]] by auto have Rab: "R (fst ab) b" using sK b unfolding K by auto have "rel_mset R M1 N" using sK K1N K1M1 unfolding K multiset.rel_compp_Grp Grp_def by auto thus ?thesis using M Rab by auto qed lemma rel_mset_imp_rel_mset': assumes "rel_mset R M N" shows "rel_mset' R M N" using assms proof(induct M arbitrary: N rule: measure_induct_rule[of size]) case (less M) have c: "size M = size N" using rel_mset_size[OF less.prems] . show ?case proof(cases "M = {#}") case True hence "N = {#}" using c by simp thus ?thesis using True rel_mset'.Zero by auto next case False then obtain M1 a where M: "M = add_mset a M1" by (metis multi_nonempty_split) obtain N1 b where N: "N = add_mset b N1" and R: "R a b" and ms: "rel_mset R M1 N1" using msed_rel_invL[OF less.prems[unfolded M]] by auto have "rel_mset' R M1 N1" using less.hyps[of M1 N1] ms unfolding M by simp thus ?thesis using rel_mset'.Plus[of R a b, OF R] unfolding M N by simp qed qed lemma rel_mset_rel_mset': "rel_mset R M N = rel_mset' R M N" using rel_mset_imp_rel_mset' rel_mset'_imp_rel_mset by auto text \The main end product for \<^const>\rel_mset\: inductive characterization:\ lemmas rel_mset_induct[case_names empty add, induct pred: rel_mset] = rel_mset'.induct[unfolded rel_mset_rel_mset'[symmetric]] subsection \Size setup\ lemma size_multiset_o_map: "size_multiset g \ image_mset f = size_multiset (g \ f)" apply (rule ext) subgoal for x by (induct x) auto done setup \ BNF_LFP_Size.register_size_global \<^type_name>\multiset\ \<^const_name>\size_multiset\ @{thm size_multiset_overloaded_def} @{thms size_multiset_empty size_multiset_single size_multiset_union size_empty size_single size_union} @{thms size_multiset_o_map} \ subsection \Lemmas about Size\ lemma size_mset_SucE: "size A = Suc n \ (\a B. A = {#a#} + B \ size B = n \ P) \ P" by (cases A) (auto simp add: ac_simps) lemma size_Suc_Diff1: "x \# M \ Suc (size (M - {#x#})) = size M" using arg_cong[OF insert_DiffM, of _ _ size] by simp lemma size_Diff_singleton: "x \# M \ size (M - {#x#}) = size M - 1" by (simp flip: size_Suc_Diff1) lemma size_Diff_singleton_if: "size (A - {#x#}) = (if x \# A then size A - 1 else size A)" by (simp add: diff_single_trivial size_Diff_singleton) lemma size_Un_Int: "size A + size B = size (A \# B) + size (A \# B)" by (metis inter_subset_eq_union size_union subset_mset.diff_add union_diff_inter_eq_sup) lemma size_Un_disjoint: "A \# B = {#} \ size (A \# B) = size A + size B" using size_Un_Int[of A B] by simp lemma size_Diff_subset_Int: "size (M - M') = size M - size (M \# M')" by (metis diff_intersect_left_idem size_Diff_submset subset_mset.inf_le1) lemma diff_size_le_size_Diff: "size (M :: _ multiset) - size M' \ size (M - M')" by (simp add: diff_le_mono2 size_Diff_subset_Int size_mset_mono) lemma size_Diff1_less: "x\# M \ size (M - {#x#}) < size M" by (rule Suc_less_SucD) (simp add: size_Suc_Diff1) lemma size_Diff2_less: "x\# M \ y\# M \ size (M - {#x#} - {#y#}) < size M" by (metis less_imp_diff_less size_Diff1_less size_Diff_subset_Int) lemma size_Diff1_le: "size (M - {#x#}) \ size M" by (cases "x \# M") (simp_all add: size_Diff1_less less_imp_le diff_single_trivial) lemma size_psubset: "M \# M' \ size M < size M' \ M \# M'" using less_irrefl subset_mset_def by blast hide_const (open) wcount end diff --git a/src/HOL/Library/Multiset_Order.thy b/src/HOL/Library/Multiset_Order.thy --- a/src/HOL/Library/Multiset_Order.thy +++ b/src/HOL/Library/Multiset_Order.thy @@ -1,426 +1,482 @@ (* Title: HOL/Library/Multiset_Order.thy Author: Dmitriy Traytel, TU Muenchen Author: Jasmin Blanchette, Inria, LORIA, MPII *) section \More Theorems about the Multiset Order\ theory Multiset_Order imports Multiset begin subsection \Alternative Characterizations\ +subsubsection \The Dershowitz--Manna Ordering\ + +definition multp\<^sub>D\<^sub>M where + "multp\<^sub>D\<^sub>M r M N \ + (\X Y. X \ {#} \ X \# N \ M = (N - X) + Y \ (\k. k \# Y \ (\a. a \# X \ r k a)))" + +lemma multp\<^sub>D\<^sub>M_imp_multp: + "multp\<^sub>D\<^sub>M r M N \ multp r M N" +proof - + assume "multp\<^sub>D\<^sub>M r M N" + then obtain X Y where + "X \ {#}" and "X \# N" and "M = N - X + Y" and "\k. k \# Y \ (\a. a \# X \ r k a)" + unfolding multp\<^sub>D\<^sub>M_def by blast + then have "multp r (N - X + Y) (N - X + X)" + by (intro one_step_implies_multp) (auto simp: Bex_def trans_def) + with \M = N - X + Y\ \X \# N\ show "multp r M N" + by (metis subset_mset.diff_add) +qed + +subsubsection \The Huet--Oppen Ordering\ + +definition multp\<^sub>H\<^sub>O where + "multp\<^sub>H\<^sub>O r M N \ M \ N \ (\y. count N y < count M y \ (\x. r y x \ count M x < count N x))" + +lemma multp_imp_multp\<^sub>H\<^sub>O: + assumes "asymp r" and "transp r" + shows "multp r M N \ multp\<^sub>H\<^sub>O r M N" + unfolding multp_def mult_def +proof (induction rule: trancl_induct) + case (base P) + then show ?case + using \asymp r\ + by (auto elim!: mult1_lessE simp: count_eq_zero_iff multp\<^sub>H\<^sub>O_def split: if_splits + dest!: Suc_lessD) +next + case (step N P) + from step(3) have "M \ N" and + **: "\y. count N y < count M y \ (\x. r y x \ count M x < count N x)" + by (simp_all add: multp\<^sub>H\<^sub>O_def) + from step(2) obtain M0 a K where + *: "P = add_mset a M0" "N = M0 + K" "a \# K" "\b. b \# K \ r b a" + using \asymp r\ by (auto elim: mult1_lessE) + from \M \ N\ ** *(1,2,3) have "M \ P" + using *(4) \asymp r\ + by (metis asymp.cases add_cancel_right_right add_diff_cancel_left' add_mset_add_single count_inI + count_union diff_diff_add_mset diff_single_trivial in_diff_count multi_member_last) + moreover + { assume "count P a \ count M a" + with \a \# K\ have "count N a < count M a" unfolding *(1,2) + by (auto simp add: not_in_iff) + with ** obtain z where z: "r a z" "count M z < count N z" + by blast + with * have "count N z \ count P z" + using \asymp r\ + by (metis add_diff_cancel_left' add_mset_add_single asymp.cases diff_diff_add_mset + diff_single_trivial in_diff_count not_le_imp_less) + with z have "\z. r a z \ count M z < count P z" by auto + } note count_a = this + { fix y + assume count_y: "count P y < count M y" + have "\x. r y x \ count M x < count P x" + proof (cases "y = a") + case True + with count_y count_a show ?thesis by auto + next + case False + show ?thesis + proof (cases "y \# K") + case True + with *(4) have "r y a" by simp + then show ?thesis + by (cases "count P a \ count M a") (auto dest: count_a intro: \transp r\[THEN transpD]) + next + case False + with \y \ a\ have "count P y = count N y" unfolding *(1,2) + by (simp add: not_in_iff) + with count_y ** obtain z where z: "r y z" "count M z < count N z" by auto + show ?thesis + proof (cases "z \# K") + case True + with *(4) have "r z a" by simp + with z(1) show ?thesis + by (cases "count P a \ count M a") (auto dest!: count_a intro: \transp r\[THEN transpD]) + next + case False + with \a \# K\ have "count N z \ count P z" unfolding * + by (auto simp add: not_in_iff) + with z show ?thesis by auto + qed + qed + qed + } + ultimately show ?case unfolding multp\<^sub>H\<^sub>O_def by blast +qed + +lemma multp\<^sub>H\<^sub>O_imp_multp\<^sub>D\<^sub>M: "multp\<^sub>H\<^sub>O r M N \ multp\<^sub>D\<^sub>M r M N" +unfolding multp\<^sub>D\<^sub>M_def +proof (intro iffI exI conjI) + assume "multp\<^sub>H\<^sub>O r M N" + then obtain z where z: "count M z < count N z" + unfolding multp\<^sub>H\<^sub>O_def by (auto simp: multiset_eq_iff nat_neq_iff) + define X where "X = N - M" + define Y where "Y = M - N" + from z show "X \ {#}" unfolding X_def by (auto simp: multiset_eq_iff not_less_eq_eq Suc_le_eq) + from z show "X \# N" unfolding X_def by auto + show "M = (N - X) + Y" unfolding X_def Y_def multiset_eq_iff count_union count_diff by force + show "\k. k \# Y \ (\a. a \# X \ r k a)" + proof (intro allI impI) + fix k + assume "k \# Y" + then have "count N k < count M k" unfolding Y_def + by (auto simp add: in_diff_count) + with \multp\<^sub>H\<^sub>O r M N\ obtain a where "r k a" and "count M a < count N a" + unfolding multp\<^sub>H\<^sub>O_def by blast + then show "\a. a \# X \ r k a" unfolding X_def + by (auto simp add: in_diff_count) + qed +qed + +lemma multp_eq_multp\<^sub>D\<^sub>M: "asymp r \ transp r \ multp r = multp\<^sub>D\<^sub>M r" + using multp\<^sub>D\<^sub>M_imp_multp multp_imp_multp\<^sub>H\<^sub>O[THEN multp\<^sub>H\<^sub>O_imp_multp\<^sub>D\<^sub>M] + by blast + +lemma multp_eq_multp\<^sub>H\<^sub>O: "asymp r \ transp r \ multp r = multp\<^sub>H\<^sub>O r" + using multp\<^sub>H\<^sub>O_imp_multp\<^sub>D\<^sub>M[THEN multp\<^sub>D\<^sub>M_imp_multp] multp_imp_multp\<^sub>H\<^sub>O + by blast + +subsubsection \Properties of Preorders\ + context preorder begin lemma order_mult: "class.order (\M N. (M, N) \ mult {(x, y). x < y} \ M = N) (\M N. (M, N) \ mult {(x, y). x < y})" (is "class.order ?le ?less") proof - have irrefl: "\M :: 'a multiset. \ ?less M M" proof fix M :: "'a multiset" have "trans {(x'::'a, x). x' < x}" by (rule transI) (blast intro: less_trans) moreover assume "(M, M) \ mult {(x, y). x < y}" ultimately have "\I J K. M = I + J \ M = I + K \ J \ {#} \ (\k\set_mset K. \j\set_mset J. (k, j) \ {(x, y). x < y})" by (rule mult_implies_one_step) then obtain I J K where "M = I + J" and "M = I + K" and "J \ {#}" and "(\k\set_mset K. \j\set_mset J. (k, j) \ {(x, y). x < y})" by blast then have aux1: "K \ {#}" and aux2: "\k\set_mset K. \j\set_mset K. k < j" by auto have "finite (set_mset K)" by simp moreover note aux2 ultimately have "set_mset K = {}" by (induct rule: finite_induct) (simp, metis (mono_tags) insert_absorb insert_iff insert_not_empty less_irrefl less_trans) with aux1 show False by simp qed have trans: "\K M N :: 'a multiset. ?less K M \ ?less M N \ ?less K N" unfolding mult_def by (blast intro: trancl_trans) show "class.order ?le ?less" by standard (auto simp add: less_eq_multiset_def irrefl dest: trans) qed text \The Dershowitz--Manna ordering:\ definition less_multiset\<^sub>D\<^sub>M where "less_multiset\<^sub>D\<^sub>M M N \ (\X Y. X \ {#} \ X \# N \ M = (N - X) + Y \ (\k. k \# Y \ (\a. a \# X \ k < a)))" text \The Huet--Oppen ordering:\ definition less_multiset\<^sub>H\<^sub>O where "less_multiset\<^sub>H\<^sub>O M N \ M \ N \ (\y. count N y < count M y \ (\x. y < x \ count M x < count N x))" lemma mult_imp_less_multiset\<^sub>H\<^sub>O: "(M, N) \ mult {(x, y). x < y} \ less_multiset\<^sub>H\<^sub>O M N" -proof (unfold mult_def, induct rule: trancl_induct) - case (base P) - then show ?case - by (auto elim!: mult1_lessE simp add: count_eq_zero_iff less_multiset\<^sub>H\<^sub>O_def split: if_splits dest!: Suc_lessD) -next - case (step N P) - from step(3) have "M \ N" and - **: "\y. count N y < count M y \ (\x>y. count M x < count N x)" - by (simp_all add: less_multiset\<^sub>H\<^sub>O_def) - from step(2) obtain M0 a K where - *: "P = add_mset a M0" "N = M0 + K" "a \# K" "\b. b \# K \ b < a" - by (blast elim: mult1_lessE) - from \M \ N\ ** *(1,2,3) have "M \ P" by (force dest: *(4) elim!: less_asym split: if_splits ) - moreover - { assume "count P a \ count M a" - with \a \# K\ have "count N a < count M a" unfolding *(1,2) - by (auto simp add: not_in_iff) - with ** obtain z where z: "z > a" "count M z < count N z" - by blast - with * have "count N z \ count P z" - by (auto elim: less_asym intro: count_inI) - with z have "\z > a. count M z < count P z" by auto - } note count_a = this - { fix y - assume count_y: "count P y < count M y" - have "\x>y. count M x < count P x" - proof (cases "y = a") - case True - with count_y count_a show ?thesis by auto - next - case False - show ?thesis - proof (cases "y \# K") - case True - with *(4) have "y < a" by simp - then show ?thesis by (cases "count P a \ count M a") (auto dest: count_a intro: less_trans) - next - case False - with \y \ a\ have "count P y = count N y" unfolding *(1,2) - by (simp add: not_in_iff) - with count_y ** obtain z where z: "z > y" "count M z < count N z" by auto - show ?thesis - proof (cases "z \# K") - case True - with *(4) have "z < a" by simp - with z(1) show ?thesis - by (cases "count P a \ count M a") (auto dest!: count_a intro: less_trans) - next - case False - with \a \# K\ have "count N z \ count P z" unfolding * - by (auto simp add: not_in_iff) - with z show ?thesis by auto - qed - qed - qed - } - ultimately show ?case unfolding less_multiset\<^sub>H\<^sub>O_def by blast -qed + unfolding multp_def[of "(<)", symmetric] + using multp_imp_multp\<^sub>H\<^sub>O[of "(<)"] + by (simp add: less_multiset\<^sub>H\<^sub>O_def multp\<^sub>H\<^sub>O_def) lemma less_multiset\<^sub>D\<^sub>M_imp_mult: "less_multiset\<^sub>D\<^sub>M M N \ (M, N) \ mult {(x, y). x < y}" -proof - - assume "less_multiset\<^sub>D\<^sub>M M N" - then obtain X Y where - "X \ {#}" and "X \# N" and "M = N - X + Y" and "\k. k \# Y \ (\a. a \# X \ k < a)" - unfolding less_multiset\<^sub>D\<^sub>M_def by blast - then have "(N - X + Y, N - X + X) \ mult {(x, y). x < y}" - by (intro one_step_implies_mult) (auto simp: Bex_def trans_def) - with \M = N - X + Y\ \X \# N\ show "(M, N) \ mult {(x, y). x < y}" - by (metis subset_mset.diff_add) -qed + unfolding multp_def[of "(<)", symmetric] + by (rule multp\<^sub>D\<^sub>M_imp_multp[of "(<)" M N]) (simp add: less_multiset\<^sub>D\<^sub>M_def multp\<^sub>D\<^sub>M_def) lemma less_multiset\<^sub>H\<^sub>O_imp_less_multiset\<^sub>D\<^sub>M: "less_multiset\<^sub>H\<^sub>O M N \ less_multiset\<^sub>D\<^sub>M M N" -unfolding less_multiset\<^sub>D\<^sub>M_def -proof (intro iffI exI conjI) - assume "less_multiset\<^sub>H\<^sub>O M N" - then obtain z where z: "count M z < count N z" - unfolding less_multiset\<^sub>H\<^sub>O_def by (auto simp: multiset_eq_iff nat_neq_iff) - define X where "X = N - M" - define Y where "Y = M - N" - from z show "X \ {#}" unfolding X_def by (auto simp: multiset_eq_iff not_less_eq_eq Suc_le_eq) - from z show "X \# N" unfolding X_def by auto - show "M = (N - X) + Y" unfolding X_def Y_def multiset_eq_iff count_union count_diff by force - show "\k. k \# Y \ (\a. a \# X \ k < a)" - proof (intro allI impI) - fix k - assume "k \# Y" - then have "count N k < count M k" unfolding Y_def - by (auto simp add: in_diff_count) - with \less_multiset\<^sub>H\<^sub>O M N\ obtain a where "k < a" and "count M a < count N a" - unfolding less_multiset\<^sub>H\<^sub>O_def by blast - then show "\a. a \# X \ k < a" unfolding X_def - by (auto simp add: in_diff_count) - qed -qed + unfolding less_multiset\<^sub>D\<^sub>M_def less_multiset\<^sub>H\<^sub>O_def + unfolding multp\<^sub>D\<^sub>M_def[symmetric] multp\<^sub>H\<^sub>O_def[symmetric] + by (rule multp\<^sub>H\<^sub>O_imp_multp\<^sub>D\<^sub>M) lemma mult_less_multiset\<^sub>D\<^sub>M: "(M, N) \ mult {(x, y). x < y} \ less_multiset\<^sub>D\<^sub>M M N" - by (metis less_multiset\<^sub>D\<^sub>M_imp_mult less_multiset\<^sub>H\<^sub>O_imp_less_multiset\<^sub>D\<^sub>M mult_imp_less_multiset\<^sub>H\<^sub>O) + unfolding multp_def[of "(<)", symmetric] + using multp_eq_multp\<^sub>D\<^sub>M[of "(<)", simplified] + by (simp add: multp\<^sub>D\<^sub>M_def less_multiset\<^sub>D\<^sub>M_def) lemma mult_less_multiset\<^sub>H\<^sub>O: "(M, N) \ mult {(x, y). x < y} \ less_multiset\<^sub>H\<^sub>O M N" - by (metis less_multiset\<^sub>D\<^sub>M_imp_mult less_multiset\<^sub>H\<^sub>O_imp_less_multiset\<^sub>D\<^sub>M mult_imp_less_multiset\<^sub>H\<^sub>O) + unfolding multp_def[of "(<)", symmetric] + using multp_eq_multp\<^sub>H\<^sub>O[of "(<)", simplified] + by (simp add: multp\<^sub>H\<^sub>O_def less_multiset\<^sub>H\<^sub>O_def) lemmas mult\<^sub>D\<^sub>M = mult_less_multiset\<^sub>D\<^sub>M[unfolded less_multiset\<^sub>D\<^sub>M_def] lemmas mult\<^sub>H\<^sub>O = mult_less_multiset\<^sub>H\<^sub>O[unfolded less_multiset\<^sub>H\<^sub>O_def] end lemma less_multiset_less_multiset\<^sub>H\<^sub>O: "M < N \ less_multiset\<^sub>H\<^sub>O M N" - unfolding less_multiset_def mult\<^sub>H\<^sub>O less_multiset\<^sub>H\<^sub>O_def .. + unfolding less_multiset_def multp_def mult\<^sub>H\<^sub>O less_multiset\<^sub>H\<^sub>O_def .. -lemmas less_multiset\<^sub>D\<^sub>M = mult\<^sub>D\<^sub>M[folded less_multiset_def] -lemmas less_multiset\<^sub>H\<^sub>O = mult\<^sub>H\<^sub>O[folded less_multiset_def] +lemma less_multiset\<^sub>D\<^sub>M: + "M < N \ (\X Y. X \ {#} \ X \# N \ M = N - X + Y \ (\k. k \# Y \ (\a. a \# X \ k < a)))" + by (rule mult\<^sub>D\<^sub>M[folded multp_def less_multiset_def]) + +lemma less_multiset\<^sub>H\<^sub>O: + "M < N \ M \ N \ (\y. count N y < count M y \ (\x>y. count M x < count N x))" + by (rule mult\<^sub>H\<^sub>O[folded multp_def less_multiset_def]) lemma subset_eq_imp_le_multiset: shows "M \# N \ M \ N" unfolding less_eq_multiset_def less_multiset\<^sub>H\<^sub>O by (simp add: less_le_not_le subseteq_mset_def) (* FIXME: "le" should be "less" in this and other names *) lemma le_multiset_right_total: "M < add_mset x M" unfolding less_eq_multiset_def less_multiset\<^sub>H\<^sub>O by simp lemma less_eq_multiset_empty_left[simp]: shows "{#} \ M" by (simp add: subset_eq_imp_le_multiset) lemma ex_gt_imp_less_multiset: "(\y. y \# N \ (\x. x \# M \ x < y)) \ M < N" unfolding less_multiset\<^sub>H\<^sub>O by (metis count_eq_zero_iff count_greater_zero_iff less_le_not_le) lemma less_eq_multiset_empty_right[simp]: "M \ {#} \ \ M \ {#}" by (metis less_eq_multiset_empty_left antisym) (* FIXME: "le" should be "less" in this and other names *) lemma le_multiset_empty_left[simp]: "M \ {#} \ {#} < M" by (simp add: less_multiset\<^sub>H\<^sub>O) (* FIXME: "le" should be "less" in this and other names *) lemma le_multiset_empty_right[simp]: "\ M < {#}" - using subset_mset.le_zero_eq less_multiset\<^sub>D\<^sub>M by blast + using subset_mset.le_zero_eq less_multiset_def multp_def less_multiset\<^sub>D\<^sub>M by blast (* FIXME: "le" should be "less" in this and other names *) lemma union_le_diff_plus: "P \# M \ N < P \ M - P + N < M" by (drule subset_mset.diff_add[symmetric]) (metis union_le_mono2) instantiation multiset :: (preorder) ordered_ab_semigroup_monoid_add_imp_le begin lemma less_eq_multiset\<^sub>H\<^sub>O: "M \ N \ (\y. count N y < count M y \ (\x. y < x \ count M x < count N x))" by (auto simp: less_eq_multiset_def less_multiset\<^sub>H\<^sub>O) instance by standard (auto simp: less_eq_multiset\<^sub>H\<^sub>O) lemma fixes M N :: "'a multiset" shows less_eq_multiset_plus_left: "N \ (M + N)" and less_eq_multiset_plus_right: "M \ (M + N)" by simp_all lemma fixes M N :: "'a multiset" shows le_multiset_plus_left_nonempty: "M \ {#} \ N < M + N" and le_multiset_plus_right_nonempty: "N \ {#} \ M < M + N" by simp_all end lemma all_lt_Max_imp_lt_mset: "N \ {#} \ (\a \# M. a < Max (set_mset N)) \ M < N" by (meson Max_in[OF finite_set_mset] ex_gt_imp_less_multiset set_mset_eq_empty_iff) lemma lt_imp_ex_count_lt: "M < N \ \y. count M y < count N y" by (meson less_eq_multiset\<^sub>H\<^sub>O less_le_not_le) lemma subset_imp_less_mset: "A \# B \ A < B" by (simp add: order.not_eq_order_implies_strict subset_eq_imp_le_multiset) lemma image_mset_strict_mono: assumes mono_f: "\x \ set_mset M. \y \ set_mset N. x < y \ f x < f y" and less: "M < N" shows "image_mset f M < image_mset f N" proof - obtain Y X where y_nemp: "Y \ {#}" and y_sub_N: "Y \# N" and M_eq: "M = N - Y + X" and ex_y: "\x. x \# X \ (\y. y \# Y \ x < y)" using less[unfolded less_multiset\<^sub>D\<^sub>M] by blast have x_sub_M: "X \# M" using M_eq by simp let ?fY = "image_mset f Y" let ?fX = "image_mset f X" show ?thesis unfolding less_multiset\<^sub>D\<^sub>M proof (intro exI conjI) show "image_mset f M = image_mset f N - ?fY + ?fX" using M_eq[THEN arg_cong, of "image_mset f"] y_sub_N by (metis image_mset_Diff image_mset_union) next obtain y where y: "\x. x \# X \ y x \# Y \ x < y x" using ex_y by moura show "\fx. fx \# ?fX \ (\fy. fy \# ?fY \ fx < fy)" proof (intro allI impI) fix fx assume "fx \# ?fX" then obtain x where fx: "fx = f x" and x_in: "x \# X" by auto hence y_in: "y x \# Y" and y_gt: "x < y x" using y[rule_format, OF x_in] by blast+ hence "f (y x) \# ?fY \ f x < f (y x)" using mono_f y_sub_N x_sub_M x_in by (metis image_eqI in_image_mset mset_subset_eqD) thus "\fy. fy \# ?fY \ fx < fy" unfolding fx by auto qed qed (auto simp: y_nemp y_sub_N image_mset_subseteq_mono) qed lemma image_mset_mono: assumes mono_f: "\x \ set_mset M. \y \ set_mset N. x < y \ f x < f y" and less: "M \ N" shows "image_mset f M \ image_mset f N" by (metis eq_iff image_mset_strict_mono less less_imp_le mono_f order.not_eq_order_implies_strict) lemma mset_lt_single_right_iff[simp]: "M < {#y#} \ (\x \# M. x < y)" for y :: "'a::linorder" proof (rule iffI) assume M_lt_y: "M < {#y#}" show "\x \# M. x < y" proof fix x assume x_in: "x \# M" hence M: "M - {#x#} + {#x#} = M" by (meson insert_DiffM2) hence "\ {#x#} < {#y#} \ x < y" using x_in M_lt_y by (metis diff_single_eq_union le_multiset_empty_left less_add_same_cancel2 mset_le_trans) also have "\ {#y#} < M" using M_lt_y mset_le_not_sym by blast ultimately show "x < y" by (metis (no_types) Max_ge all_lt_Max_imp_lt_mset empty_iff finite_set_mset insertE less_le_trans linorder_less_linear mset_le_not_sym set_mset_add_mset_insert set_mset_eq_empty_iff x_in) qed next assume y_max: "\x \# M. x < y" show "M < {#y#}" by (rule all_lt_Max_imp_lt_mset) (auto intro!: y_max) qed lemma mset_le_single_right_iff[simp]: "M \ {#y#} \ M = {#y#} \ (\x \# M. x < y)" for y :: "'a::linorder" by (meson less_eq_multiset_def mset_lt_single_right_iff) subsection \Simprocs\ lemma mset_le_add_iff1: "j \ (i::nat) \ (repeat_mset i u + m \ repeat_mset j u + n) = (repeat_mset (i-j) u + m \ n)" proof - assume "j \ i" then have "j + (i - j) = i" using le_add_diff_inverse by blast then show ?thesis by (metis (no_types) add_le_cancel_left left_add_mult_distrib_mset) qed lemma mset_le_add_iff2: "i \ (j::nat) \ (repeat_mset i u + m \ repeat_mset j u + n) = (m \ repeat_mset (j-i) u + n)" proof - assume "i \ j" then have "i + (j - i) = j" using le_add_diff_inverse by blast then show ?thesis by (metis (no_types) add_le_cancel_left left_add_mult_distrib_mset) qed simproc_setup msetless_cancel ("(l::'a::preorder multiset) + m < n" | "(l::'a multiset) < m + n" | "add_mset a m < n" | "m < add_mset a n" | "replicate_mset p a < n" | "m < replicate_mset p a" | "repeat_mset p m < n" | "m < repeat_mset p n") = \fn phi => Cancel_Simprocs.less_cancel\ simproc_setup msetle_cancel ("(l::'a::preorder multiset) + m \ n" | "(l::'a multiset) \ m + n" | "add_mset a m \ n" | "m \ add_mset a n" | "replicate_mset p a \ n" | "m \ replicate_mset p a" | "repeat_mset p m \ n" | "m \ repeat_mset p n") = \fn phi => Cancel_Simprocs.less_eq_cancel\ subsection \Additional facts and instantiations\ lemma ex_gt_count_imp_le_multiset: "(\y :: 'a :: order. y \# M + N \ y \ x) \ count M x < count N x \ M < N" unfolding less_multiset\<^sub>H\<^sub>O by (metis count_greater_zero_iff le_imp_less_or_eq less_imp_not_less not_gr_zero union_iff) lemma mset_lt_single_iff[iff]: "{#x#} < {#y#} \ x < y" unfolding less_multiset\<^sub>H\<^sub>O by simp lemma mset_le_single_iff[iff]: "{#x#} \ {#y#} \ x \ y" for x y :: "'a::order" unfolding less_eq_multiset\<^sub>H\<^sub>O by force instance multiset :: (linorder) linordered_cancel_ab_semigroup_add by standard (metis less_eq_multiset\<^sub>H\<^sub>O not_less_iff_gr_or_eq) lemma less_eq_multiset_total: fixes M N :: "'a :: linorder multiset" shows "\ M \ N \ N \ M" by simp instantiation multiset :: (wellorder) wellorder begin lemma wf_less_multiset: "wf {(M :: 'a multiset, N). M < N}" - unfolding less_multiset_def by (auto intro: wf_mult wf) + unfolding less_multiset_def multp_def by (auto intro: wf_mult wf) -instance by standard (metis less_multiset_def wf wf_def wf_mult) +instance by standard (metis less_multiset_def multp_def wf wf_def wf_mult) end instantiation multiset :: (preorder) order_bot begin definition bot_multiset :: "'a multiset" where "bot_multiset = {#}" instance by standard (simp add: bot_multiset_def) end instance multiset :: (preorder) no_top proof standard fix x :: "'a multiset" obtain a :: 'a where True by simp have "x < x + (x + {#a#})" by simp then show "\y. x < y" by blast qed instance multiset :: (preorder) ordered_cancel_comm_monoid_add by standard instantiation multiset :: (linorder) distrib_lattice begin definition inf_multiset :: "'a multiset \ 'a multiset \ 'a multiset" where "inf_multiset A B = (if A < B then A else B)" definition sup_multiset :: "'a multiset \ 'a multiset \ 'a multiset" where "sup_multiset A B = (if B > A then B else A)" instance by intro_classes (auto simp: inf_multiset_def sup_multiset_def) end end diff --git a/src/HOL/Library/Rewrite.thy b/src/HOL/Library/Rewrite.thy --- a/src/HOL/Library/Rewrite.thy +++ b/src/HOL/Library/Rewrite.thy @@ -1,28 +1,30 @@ (* Title: HOL/Library/Rewrite.thy Author: Christoph Traut, Lars Noschinski, TU Muenchen Proof method "rewrite" with support for subterm-selection based on patterns. + +Documentation: https://arxiv.org/abs/2111.04082 *) theory Rewrite imports Main begin consts rewrite_HOLE :: "'a::{}" ("\") lemma eta_expand: fixes f :: "'a::{} \ 'b::{}" shows "f \ \x. f x" . lemma imp_cong_eq: "(PROP A \ (PROP B \ PROP C) \ (PROP B' \ PROP C')) \ ((PROP B \ PROP A \ PROP C) \ (PROP B' \ PROP A \ PROP C'))" apply (intro Pure.equal_intr_rule) apply (drule (1) cut_rl; drule Pure.equal_elim_rule1 Pure.equal_elim_rule2; assumption)+ apply (drule Pure.equal_elim_rule1 Pure.equal_elim_rule2; assumption)+ done ML_file \cconv.ML\ ML_file \rewrite.ML\ end diff --git a/src/HOL/List.thy b/src/HOL/List.thy --- a/src/HOL/List.thy +++ b/src/HOL/List.thy @@ -1,8319 +1,8323 @@ (* Title: HOL/List.thy Author: Tobias Nipkow; proofs tidied by LCP *) section \The datatype of finite lists\ theory List imports Sledgehammer Lifting_Set begin datatype (set: 'a) list = Nil ("[]") | Cons (hd: 'a) (tl: "'a list") (infixr "#" 65) for map: map rel: list_all2 pred: list_all where "tl [] = []" datatype_compat list lemma [case_names Nil Cons, cases type: list]: \ \for backward compatibility -- names of variables differ\ "(y = [] \ P) \ (\a list. y = a # list \ P) \ P" by (rule list.exhaust) lemma [case_names Nil Cons, induct type: list]: \ \for backward compatibility -- names of variables differ\ "P [] \ (\a list. P list \ P (a # list)) \ P list" by (rule list.induct) text \Compatibility:\ setup \Sign.mandatory_path "list"\ lemmas inducts = list.induct lemmas recs = list.rec lemmas cases = list.case setup \Sign.parent_path\ lemmas set_simps = list.set (* legacy *) syntax \ \list Enumeration\ "_list" :: "args => 'a list" ("[(_)]") translations "[x, xs]" == "x#[xs]" "[x]" == "x#[]" subsection \Basic list processing functions\ primrec (nonexhaustive) last :: "'a list \ 'a" where "last (x # xs) = (if xs = [] then x else last xs)" primrec butlast :: "'a list \ 'a list" where "butlast [] = []" | "butlast (x # xs) = (if xs = [] then [] else x # butlast xs)" lemma set_rec: "set xs = rec_list {} (\x _. insert x) xs" by (induct xs) auto definition coset :: "'a list \ 'a set" where [simp]: "coset xs = - set xs" primrec append :: "'a list \ 'a list \ 'a list" (infixr "@" 65) where append_Nil: "[] @ ys = ys" | append_Cons: "(x#xs) @ ys = x # xs @ ys" primrec rev :: "'a list \ 'a list" where "rev [] = []" | "rev (x # xs) = rev xs @ [x]" primrec filter:: "('a \ bool) \ 'a list \ 'a list" where "filter P [] = []" | "filter P (x # xs) = (if P x then x # filter P xs else filter P xs)" text \Special input syntax for filter:\ syntax (ASCII) "_filter" :: "[pttrn, 'a list, bool] => 'a list" ("(1[_<-_./ _])") syntax "_filter" :: "[pttrn, 'a list, bool] => 'a list" ("(1[_\_ ./ _])") translations "[x<-xs . P]" \ "CONST filter (\x. P) xs" primrec fold :: "('a \ 'b \ 'b) \ 'a list \ 'b \ 'b" where fold_Nil: "fold f [] = id" | fold_Cons: "fold f (x # xs) = fold f xs \ f x" primrec foldr :: "('a \ 'b \ 'b) \ 'a list \ 'b \ 'b" where foldr_Nil: "foldr f [] = id" | foldr_Cons: "foldr f (x # xs) = f x \ foldr f xs" primrec foldl :: "('b \ 'a \ 'b) \ 'b \ 'a list \ 'b" where foldl_Nil: "foldl f a [] = a" | foldl_Cons: "foldl f a (x # xs) = foldl f (f a x) xs" primrec concat:: "'a list list \ 'a list" where "concat [] = []" | "concat (x # xs) = x @ concat xs" primrec drop:: "nat \ 'a list \ 'a list" where drop_Nil: "drop n [] = []" | drop_Cons: "drop n (x # xs) = (case n of 0 \ x # xs | Suc m \ drop m xs)" \ \Warning: simpset does not contain this definition, but separate theorems for \n = 0\ and \n = Suc k\\ primrec take:: "nat \ 'a list \ 'a list" where take_Nil:"take n [] = []" | take_Cons: "take n (x # xs) = (case n of 0 \ [] | Suc m \ x # take m xs)" \ \Warning: simpset does not contain this definition, but separate theorems for \n = 0\ and \n = Suc k\\ primrec (nonexhaustive) nth :: "'a list => nat => 'a" (infixl "!" 100) where nth_Cons: "(x # xs) ! n = (case n of 0 \ x | Suc k \ xs ! k)" \ \Warning: simpset does not contain this definition, but separate theorems for \n = 0\ and \n = Suc k\\ primrec list_update :: "'a list \ nat \ 'a \ 'a list" where "list_update [] i v = []" | "list_update (x # xs) i v = (case i of 0 \ v # xs | Suc j \ x # list_update xs j v)" nonterminal lupdbinds and lupdbind syntax "_lupdbind":: "['a, 'a] => lupdbind" ("(2_ :=/ _)") "" :: "lupdbind => lupdbinds" ("_") "_lupdbinds" :: "[lupdbind, lupdbinds] => lupdbinds" ("_,/ _") "_LUpdate" :: "['a, lupdbinds] => 'a" ("_/[(_)]" [1000,0] 900) translations "_LUpdate xs (_lupdbinds b bs)" == "_LUpdate (_LUpdate xs b) bs" "xs[i:=x]" == "CONST list_update xs i x" primrec takeWhile :: "('a \ bool) \ 'a list \ 'a list" where "takeWhile P [] = []" | "takeWhile P (x # xs) = (if P x then x # takeWhile P xs else [])" primrec dropWhile :: "('a \ bool) \ 'a list \ 'a list" where "dropWhile P [] = []" | "dropWhile P (x # xs) = (if P x then dropWhile P xs else x # xs)" primrec zip :: "'a list \ 'b list \ ('a \ 'b) list" where "zip xs [] = []" | zip_Cons: "zip xs (y # ys) = (case xs of [] \ [] | z # zs \ (z, y) # zip zs ys)" \ \Warning: simpset does not contain this definition, but separate theorems for \xs = []\ and \xs = z # zs\\ abbreviation map2 :: "('a \ 'b \ 'c) \ 'a list \ 'b list \ 'c list" where "map2 f xs ys \ map (\(x,y). f x y) (zip xs ys)" primrec product :: "'a list \ 'b list \ ('a \ 'b) list" where "product [] _ = []" | "product (x#xs) ys = map (Pair x) ys @ product xs ys" hide_const (open) product primrec product_lists :: "'a list list \ 'a list list" where "product_lists [] = [[]]" | "product_lists (xs # xss) = concat (map (\x. map (Cons x) (product_lists xss)) xs)" primrec upt :: "nat \ nat \ nat list" ("(1[_.. j then [i.. 'a list \ 'a list" where "insert x xs = (if x \ set xs then xs else x # xs)" definition union :: "'a list \ 'a list \ 'a list" where "union = fold insert" hide_const (open) insert union hide_fact (open) insert_def union_def primrec find :: "('a \ bool) \ 'a list \ 'a option" where "find _ [] = None" | "find P (x#xs) = (if P x then Some x else find P xs)" text \In the context of multisets, \count_list\ is equivalent to \<^term>\count \ mset\ and it it advisable to use the latter.\ primrec count_list :: "'a list \ 'a \ nat" where "count_list [] y = 0" | "count_list (x#xs) y = (if x=y then count_list xs y + 1 else count_list xs y)" definition "extract" :: "('a \ bool) \ 'a list \ ('a list * 'a * 'a list) option" where "extract P xs = (case dropWhile (Not \ P) xs of [] \ None | y#ys \ Some(takeWhile (Not \ P) xs, y, ys))" hide_const (open) "extract" primrec those :: "'a option list \ 'a list option" where "those [] = Some []" | "those (x # xs) = (case x of None \ None | Some y \ map_option (Cons y) (those xs))" primrec remove1 :: "'a \ 'a list \ 'a list" where "remove1 x [] = []" | "remove1 x (y # xs) = (if x = y then xs else y # remove1 x xs)" primrec removeAll :: "'a \ 'a list \ 'a list" where "removeAll x [] = []" | "removeAll x (y # xs) = (if x = y then removeAll x xs else y # removeAll x xs)" primrec distinct :: "'a list \ bool" where "distinct [] \ True" | "distinct (x # xs) \ x \ set xs \ distinct xs" fun successively :: "('a \ 'a \ bool) \ 'a list \ bool" where "successively P [] = True" | "successively P [x] = True" | "successively P (x # y # xs) = (P x y \ successively P (y#xs))" definition distinct_adj where "distinct_adj = successively (\)" primrec remdups :: "'a list \ 'a list" where "remdups [] = []" | "remdups (x # xs) = (if x \ set xs then remdups xs else x # remdups xs)" fun remdups_adj :: "'a list \ 'a list" where "remdups_adj [] = []" | "remdups_adj [x] = [x]" | "remdups_adj (x # y # xs) = (if x = y then remdups_adj (x # xs) else x # remdups_adj (y # xs))" primrec replicate :: "nat \ 'a \ 'a list" where replicate_0: "replicate 0 x = []" | replicate_Suc: "replicate (Suc n) x = x # replicate n x" text \ Function \size\ is overloaded for all datatypes. Users may refer to the list version as \length\.\ abbreviation length :: "'a list \ nat" where "length \ size" definition enumerate :: "nat \ 'a list \ (nat \ 'a) list" where enumerate_eq_zip: "enumerate n xs = zip [n.. 'a list" where "rotate1 [] = []" | "rotate1 (x # xs) = xs @ [x]" definition rotate :: "nat \ 'a list \ 'a list" where "rotate n = rotate1 ^^ n" definition nths :: "'a list => nat set => 'a list" where "nths xs A = map fst (filter (\p. snd p \ A) (zip xs [0.. 'a list list" where "subseqs [] = [[]]" | "subseqs (x#xs) = (let xss = subseqs xs in map (Cons x) xss @ xss)" primrec n_lists :: "nat \ 'a list \ 'a list list" where "n_lists 0 xs = [[]]" | "n_lists (Suc n) xs = concat (map (\ys. map (\y. y # ys) xs) (n_lists n xs))" hide_const (open) n_lists function splice :: "'a list \ 'a list \ 'a list" where "splice [] ys = ys" | "splice (x#xs) ys = x # splice ys xs" by pat_completeness auto termination by(relation "measure(\(xs,ys). size xs + size ys)") auto function shuffles where "shuffles [] ys = {ys}" | "shuffles xs [] = {xs}" | "shuffles (x # xs) (y # ys) = (#) x ` shuffles xs (y # ys) \ (#) y ` shuffles (x # xs) ys" by pat_completeness simp_all termination by lexicographic_order text\Use only if you cannot use \<^const>\Min\ instead:\ fun min_list :: "'a::ord list \ 'a" where "min_list (x # xs) = (case xs of [] \ x | _ \ min x (min_list xs))" text\Returns first minimum:\ fun arg_min_list :: "('a \ ('b::linorder)) \ 'a list \ 'a" where "arg_min_list f [x] = x" | "arg_min_list f (x#y#zs) = (let m = arg_min_list f (y#zs) in if f x \ f m then x else m)" text\ \begin{figure}[htbp] \fbox{ \begin{tabular}{l} @{lemma "[a,b]@[c,d] = [a,b,c,d]" by simp}\\ @{lemma "length [a,b,c] = 3" by simp}\\ @{lemma "set [a,b,c] = {a,b,c}" by simp}\\ @{lemma "map f [a,b,c] = [f a, f b, f c]" by simp}\\ @{lemma "rev [a,b,c] = [c,b,a]" by simp}\\ @{lemma "hd [a,b,c,d] = a" by simp}\\ @{lemma "tl [a,b,c,d] = [b,c,d]" by simp}\\ @{lemma "last [a,b,c,d] = d" by simp}\\ @{lemma "butlast [a,b,c,d] = [a,b,c]" by simp}\\ @{lemma[source] "filter (\n::nat. n<2) [0,2,1] = [0,1]" by simp}\\ @{lemma "concat [[a,b],[c,d,e],[],[f]] = [a,b,c,d,e,f]" by simp}\\ @{lemma "fold f [a,b,c] x = f c (f b (f a x))" by simp}\\ @{lemma "foldr f [a,b,c] x = f a (f b (f c x))" by simp}\\ @{lemma "foldl f x [a,b,c] = f (f (f x a) b) c" by simp}\\ @{lemma "successively (\) [True,False,True,False]" by simp}\\ @{lemma "zip [a,b,c] [x,y,z] = [(a,x),(b,y),(c,z)]" by simp}\\ @{lemma "zip [a,b] [x,y,z] = [(a,x),(b,y)]" by simp}\\ @{lemma "enumerate 3 [a,b,c] = [(3,a),(4,b),(5,c)]" by normalization}\\ @{lemma "List.product [a,b] [c,d] = [(a, c), (a, d), (b, c), (b, d)]" by simp}\\ @{lemma "product_lists [[a,b], [c], [d,e]] = [[a,c,d], [a,c,e], [b,c,d], [b,c,e]]" by simp}\\ @{lemma "splice [a,b,c] [x,y,z] = [a,x,b,y,c,z]" by simp}\\ @{lemma "splice [a,b,c,d] [x,y] = [a,x,b,y,c,d]" by simp}\\ @{lemma "shuffles [a,b] [c,d] = {[a,b,c,d],[a,c,b,d],[a,c,d,b],[c,a,b,d],[c,a,d,b],[c,d,a,b]}" by (simp add: insert_commute)}\\ @{lemma "take 2 [a,b,c,d] = [a,b]" by simp}\\ @{lemma "take 6 [a,b,c,d] = [a,b,c,d]" by simp}\\ @{lemma "drop 2 [a,b,c,d] = [c,d]" by simp}\\ @{lemma "drop 6 [a,b,c,d] = []" by simp}\\ @{lemma "takeWhile (%n::nat. n<3) [1,2,3,0] = [1,2]" by simp}\\ @{lemma "dropWhile (%n::nat. n<3) [1,2,3,0] = [3,0]" by simp}\\ @{lemma "distinct [2,0,1::nat]" by simp}\\ @{lemma "remdups [2,0,2,1::nat,2] = [0,1,2]" by simp}\\ @{lemma "remdups_adj [2,2,3,1,1::nat,2,1] = [2,3,1,2,1]" by simp}\\ @{lemma "List.insert 2 [0::nat,1,2] = [0,1,2]" by (simp add: List.insert_def)}\\ @{lemma "List.insert 3 [0::nat,1,2] = [3,0,1,2]" by (simp add: List.insert_def)}\\ @{lemma "List.union [2,3,4] [0::int,1,2] = [4,3,0,1,2]" by (simp add: List.insert_def List.union_def)}\\ @{lemma "List.find (%i::int. i>0) [0,0] = None" by simp}\\ @{lemma "List.find (%i::int. i>0) [0,1,0,2] = Some 1" by simp}\\ @{lemma "count_list [0,1,0,2::int] 0 = 2" by (simp)}\\ @{lemma "List.extract (%i::int. i>0) [0,0] = None" by(simp add: extract_def)}\\ @{lemma "List.extract (%i::int. i>0) [0,1,0,2] = Some([0], 1, [0,2])" by(simp add: extract_def)}\\ @{lemma "remove1 2 [2,0,2,1::nat,2] = [0,2,1,2]" by simp}\\ @{lemma "removeAll 2 [2,0,2,1::nat,2] = [0,1]" by simp}\\ @{lemma "nth [a,b,c,d] 2 = c" by simp}\\ @{lemma "[a,b,c,d][2 := x] = [a,b,x,d]" by simp}\\ @{lemma "nths [a,b,c,d,e] {0,2,3} = [a,c,d]" by (simp add:nths_def)}\\ @{lemma "subseqs [a,b] = [[a, b], [a], [b], []]" by simp}\\ @{lemma "List.n_lists 2 [a,b,c] = [[a, a], [b, a], [c, a], [a, b], [b, b], [c, b], [a, c], [b, c], [c, c]]" by (simp add: eval_nat_numeral)}\\ @{lemma "rotate1 [a,b,c,d] = [b,c,d,a]" by simp}\\ @{lemma "rotate 3 [a,b,c,d] = [d,a,b,c]" by (simp add:rotate_def eval_nat_numeral)}\\ @{lemma "replicate 4 a = [a,a,a,a]" by (simp add:eval_nat_numeral)}\\ @{lemma "[2..<5] = [2,3,4]" by (simp add:eval_nat_numeral)}\\ @{lemma "min_list [3,1,-2::int] = -2" by (simp)}\\ @{lemma "arg_min_list (\i. i*i) [3,-1,1,-2::int] = -1" by (simp)} \end{tabular}} \caption{Characteristic examples} \label{fig:Characteristic} \end{figure} Figure~\ref{fig:Characteristic} shows characteristic examples that should give an intuitive understanding of the above functions. \ text\The following simple sort(ed) functions are intended for proofs, not for efficient implementations.\ text \A sorted predicate w.r.t. a relation:\ fun sorted_wrt :: "('a \ 'a \ bool) \ 'a list \ bool" where "sorted_wrt P [] = True" | "sorted_wrt P (x # ys) = ((\y \ set ys. P x y) \ sorted_wrt P ys)" text \A class-based sorted predicate:\ context linorder begin abbreviation sorted :: "'a list \ bool" where "sorted \ sorted_wrt (\)" lemma sorted_simps: "sorted [] = True" "sorted (x # ys) = ((\y \ set ys. x\y) \ sorted ys)" by auto lemma strict_sorted_simps: "sorted_wrt (<) [] = True" "sorted_wrt (<) (x # ys) = ((\y \ set ys. x sorted_wrt (<) ys)" by auto primrec insort_key :: "('b \ 'a) \ 'b \ 'b list \ 'b list" where "insort_key f x [] = [x]" | "insort_key f x (y#ys) = (if f x \ f y then (x#y#ys) else y#(insort_key f x ys))" definition sort_key :: "('b \ 'a) \ 'b list \ 'b list" where "sort_key f xs = foldr (insort_key f) xs []" definition insort_insert_key :: "('b \ 'a) \ 'b \ 'b list \ 'b list" where "insort_insert_key f x xs = (if f x \ f ` set xs then xs else insort_key f x xs)" abbreviation "sort \ sort_key (\x. x)" abbreviation "insort \ insort_key (\x. x)" abbreviation "insort_insert \ insort_insert_key (\x. x)" definition stable_sort_key :: "(('b \ 'a) \ 'b list \ 'b list) \ bool" where "stable_sort_key sk = (\f xs k. filter (\y. f y = k) (sk f xs) = filter (\y. f y = k) xs)" lemma strict_sorted_iff: "sorted_wrt (<) l \ sorted l \ distinct l" by (induction l) (auto iff: antisym_conv1) lemma strict_sorted_imp_sorted: "sorted_wrt (<) xs \ sorted xs" by (auto simp: strict_sorted_iff) end subsubsection \List comprehension\ text\Input syntax for Haskell-like list comprehension notation. Typical example: \[(x,y). x \ xs, y \ ys, x \ y]\, the list of all pairs of distinct elements from \xs\ and \ys\. The syntax is as in Haskell, except that \|\ becomes a dot (like in Isabelle's set comprehension): \[e. x \ xs, \]\ rather than \verb![e| x <- xs, ...]!. The qualifiers after the dot are \begin{description} \item[generators] \p \ xs\, where \p\ is a pattern and \xs\ an expression of list type, or \item[guards] \b\, where \b\ is a boolean expression. %\item[local bindings] @ {text"let x = e"}. \end{description} Just like in Haskell, list comprehension is just a shorthand. To avoid misunderstandings, the translation into desugared form is not reversed upon output. Note that the translation of \[e. x \ xs]\ is optmized to \<^term>\map (%x. e) xs\. It is easy to write short list comprehensions which stand for complex expressions. During proofs, they may become unreadable (and mangled). In such cases it can be advisable to introduce separate definitions for the list comprehensions in question.\ nonterminal lc_qual and lc_quals syntax "_listcompr" :: "'a \ lc_qual \ lc_quals \ 'a list" ("[_ . __") "_lc_gen" :: "'a \ 'a list \ lc_qual" ("_ \ _") "_lc_test" :: "bool \ lc_qual" ("_") (*"_lc_let" :: "letbinds => lc_qual" ("let _")*) "_lc_end" :: "lc_quals" ("]") "_lc_quals" :: "lc_qual \ lc_quals \ lc_quals" (", __") syntax (ASCII) "_lc_gen" :: "'a \ 'a list \ lc_qual" ("_ <- _") parse_translation \ let val NilC = Syntax.const \<^const_syntax>\Nil\; val ConsC = Syntax.const \<^const_syntax>\Cons\; val mapC = Syntax.const \<^const_syntax>\map\; val concatC = Syntax.const \<^const_syntax>\concat\; val IfC = Syntax.const \<^const_syntax>\If\; val dummyC = Syntax.const \<^const_syntax>\Pure.dummy_pattern\ fun single x = ConsC $ x $ NilC; fun pat_tr ctxt p e opti = (* %x. case x of p => e | _ => [] *) let (* FIXME proper name context!? *) val x = Free (singleton (Name.variant_list (fold Term.add_free_names [p, e] [])) "x", dummyT); val e = if opti then single e else e; val case1 = Syntax.const \<^syntax_const>\_case1\ $ p $ e; val case2 = Syntax.const \<^syntax_const>\_case1\ $ dummyC $ NilC; val cs = Syntax.const \<^syntax_const>\_case2\ $ case1 $ case2; in Syntax_Trans.abs_tr [x, Case_Translation.case_tr false ctxt [x, cs]] end; fun pair_pat_tr (x as Free _) e = Syntax_Trans.abs_tr [x, e] | pair_pat_tr (_ $ p1 $ p2) e = Syntax.const \<^const_syntax>\case_prod\ $ pair_pat_tr p1 (pair_pat_tr p2 e) | pair_pat_tr dummy e = Syntax_Trans.abs_tr [Syntax.const "_idtdummy", e] fun pair_pat ctxt (Const (\<^const_syntax>\Pair\,_) $ s $ t) = pair_pat ctxt s andalso pair_pat ctxt t | pair_pat ctxt (Free (s,_)) = let val thy = Proof_Context.theory_of ctxt; val s' = Proof_Context.intern_const ctxt s; in not (Sign.declared_const thy s') end | pair_pat _ t = (t = dummyC); fun abs_tr ctxt p e opti = let val p = Term_Position.strip_positions p in if pair_pat ctxt p then (pair_pat_tr p e, true) else (pat_tr ctxt p e opti, false) end fun lc_tr ctxt [e, Const (\<^syntax_const>\_lc_test\, _) $ b, qs] = let val res = (case qs of Const (\<^syntax_const>\_lc_end\, _) => single e | Const (\<^syntax_const>\_lc_quals\, _) $ q $ qs => lc_tr ctxt [e, q, qs]); in IfC $ b $ res $ NilC end | lc_tr ctxt [e, Const (\<^syntax_const>\_lc_gen\, _) $ p $ es, Const(\<^syntax_const>\_lc_end\, _)] = (case abs_tr ctxt p e true of (f, true) => mapC $ f $ es | (f, false) => concatC $ (mapC $ f $ es)) | lc_tr ctxt [e, Const (\<^syntax_const>\_lc_gen\, _) $ p $ es, Const (\<^syntax_const>\_lc_quals\, _) $ q $ qs] = let val e' = lc_tr ctxt [e, q, qs]; in concatC $ (mapC $ (fst (abs_tr ctxt p e' false)) $ es) end; in [(\<^syntax_const>\_listcompr\, lc_tr)] end \ ML_val \ let val read = Syntax.read_term \<^context> o Syntax.implode_input; fun check s1 s2 = read s1 aconv read s2 orelse error ("Check failed: " ^ quote (#1 (Input.source_content s1)) ^ Position.here_list [Input.pos_of s1, Input.pos_of s2]); in check \[(x,y,z). b]\ \if b then [(x, y, z)] else []\; check \[(x,y,z). (x,_,y)\xs]\ \map (\(x,_,y). (x, y, z)) xs\; check \[e x y. (x,_)\xs, y\ys]\ \concat (map (\(x,_). map (\y. e x y) ys) xs)\; check \[(x,y,z). xb]\ \if x < a then if b < x then [(x, y, z)] else [] else []\; check \[(x,y,z). x\xs, x>b]\ \concat (map (\x. if b < x then [(x, y, z)] else []) xs)\; check \[(x,y,z). xxs]\ \if x < a then map (\x. (x, y, z)) xs else []\; check \[(x,y). Cons True x \ xs]\ \concat (map (\xa. case xa of [] \ [] | True # x \ [(x, y)] | False # x \ []) xs)\; check \[(x,y,z). Cons x [] \ xs]\ \concat (map (\xa. case xa of [] \ [] | [x] \ [(x, y, z)] | x # aa # lista \ []) xs)\; check \[(x,y,z). xb, x=d]\ \if x < a then if b < x then if x = d then [(x, y, z)] else [] else [] else []\; check \[(x,y,z). xb, y\ys]\ \if x < a then if b < x then map (\y. (x, y, z)) ys else [] else []\; check \[(x,y,z). xxs,y>b]\ \if x < a then concat (map (\(_,x). if b < y then [(x, y, z)] else []) xs) else []\; check \[(x,y,z). xxs, y\ys]\ \if x < a then concat (map (\x. map (\y. (x, y, z)) ys) xs) else []\; check \[(x,y,z). x\xs, x>b, y \concat (map (\x. if b < x then if y < a then [(x, y, z)] else [] else []) xs)\; check \[(x,y,z). x\xs, x>b, y\ys]\ \concat (map (\x. if b < x then map (\y. (x, y, z)) ys else []) xs)\; check \[(x,y,z). x\xs, (y,_)\ys,y>x]\ \concat (map (\x. concat (map (\(y,_). if x < y then [(x, y, z)] else []) ys)) xs)\; check \[(x,y,z). x\xs, y\ys,z\zs]\ \concat (map (\x. concat (map (\y. map (\z. (x, y, z)) zs) ys)) xs)\ end; \ ML \ (* Simproc for rewriting list comprehensions applied to List.set to set comprehension. *) signature LIST_TO_SET_COMPREHENSION = sig val simproc : Proof.context -> cterm -> thm option end structure List_to_Set_Comprehension : LIST_TO_SET_COMPREHENSION = struct (* conversion *) fun all_exists_conv cv ctxt ct = (case Thm.term_of ct of Const (\<^const_name>\Ex\, _) $ Abs _ => Conv.arg_conv (Conv.abs_conv (all_exists_conv cv o #2) ctxt) ct | _ => cv ctxt ct) fun all_but_last_exists_conv cv ctxt ct = (case Thm.term_of ct of Const (\<^const_name>\Ex\, _) $ Abs (_, _, Const (\<^const_name>\Ex\, _) $ _) => Conv.arg_conv (Conv.abs_conv (all_but_last_exists_conv cv o #2) ctxt) ct | _ => cv ctxt ct) fun Collect_conv cv ctxt ct = (case Thm.term_of ct of Const (\<^const_name>\Collect\, _) $ Abs _ => Conv.arg_conv (Conv.abs_conv cv ctxt) ct | _ => raise CTERM ("Collect_conv", [ct])) fun rewr_conv' th = Conv.rewr_conv (mk_meta_eq th) fun conjunct_assoc_conv ct = Conv.try_conv (rewr_conv' @{thm conj_assoc} then_conv HOLogic.conj_conv Conv.all_conv conjunct_assoc_conv) ct fun right_hand_set_comprehension_conv conv ctxt = HOLogic.Trueprop_conv (HOLogic.eq_conv Conv.all_conv (Collect_conv (all_exists_conv conv o #2) ctxt)) (* term abstraction of list comprehension patterns *) datatype termlets = If | Case of typ * int local val set_Nil_I = @{lemma "set [] = {x. False}" by (simp add: empty_def [symmetric])} val set_singleton = @{lemma "set [a] = {x. x = a}" by simp} val inst_Collect_mem_eq = @{lemma "set A = {x. x \ set A}" by simp} val del_refl_eq = @{lemma "(t = t \ P) \ P" by simp} fun mk_set T = Const (\<^const_name>\set\, HOLogic.listT T --> HOLogic.mk_setT T) fun dest_set (Const (\<^const_name>\set\, _) $ xs) = xs fun dest_singleton_list (Const (\<^const_name>\Cons\, _) $ t $ (Const (\<^const_name>\Nil\, _))) = t | dest_singleton_list t = raise TERM ("dest_singleton_list", [t]) (*We check that one case returns a singleton list and all other cases return [], and return the index of the one singleton list case.*) fun possible_index_of_singleton_case cases = let fun check (i, case_t) s = (case strip_abs_body case_t of (Const (\<^const_name>\Nil\, _)) => s | _ => (case s of SOME NONE => SOME (SOME i) | _ => NONE)) in fold_index check cases (SOME NONE) |> the_default NONE end (*returns condition continuing term option*) fun dest_if (Const (\<^const_name>\If\, _) $ cond $ then_t $ Const (\<^const_name>\Nil\, _)) = SOME (cond, then_t) | dest_if _ = NONE (*returns (case_expr type index chosen_case constr_name) option*) fun dest_case ctxt case_term = let val (case_const, args) = strip_comb case_term in (case try dest_Const case_const of SOME (c, T) => (case Ctr_Sugar.ctr_sugar_of_case ctxt c of SOME {ctrs, ...} => (case possible_index_of_singleton_case (fst (split_last args)) of SOME i => let val constr_names = map (fst o dest_Const) ctrs val (Ts, _) = strip_type T val T' = List.last Ts in SOME (List.last args, T', i, nth args i, nth constr_names i) end | NONE => NONE) | NONE => NONE) | NONE => NONE) end fun tac ctxt [] = resolve_tac ctxt [set_singleton] 1 ORELSE resolve_tac ctxt [inst_Collect_mem_eq] 1 | tac ctxt (If :: cont) = Splitter.split_tac ctxt @{thms if_split} 1 THEN resolve_tac ctxt @{thms conjI} 1 THEN resolve_tac ctxt @{thms impI} 1 THEN Subgoal.FOCUS (fn {prems, context = ctxt', ...} => CONVERSION (right_hand_set_comprehension_conv (K (HOLogic.conj_conv (Conv.rewr_conv (List.last prems RS @{thm Eq_TrueI})) Conv.all_conv then_conv rewr_conv' @{lemma "(True \ P) = P" by simp})) ctxt') 1) ctxt 1 THEN tac ctxt cont THEN resolve_tac ctxt @{thms impI} 1 THEN Subgoal.FOCUS (fn {prems, context = ctxt', ...} => CONVERSION (right_hand_set_comprehension_conv (K (HOLogic.conj_conv (Conv.rewr_conv (List.last prems RS @{thm Eq_FalseI})) Conv.all_conv then_conv rewr_conv' @{lemma "(False \ P) = False" by simp})) ctxt') 1) ctxt 1 THEN resolve_tac ctxt [set_Nil_I] 1 | tac ctxt (Case (T, i) :: cont) = let val SOME {injects, distincts, case_thms, split, ...} = Ctr_Sugar.ctr_sugar_of ctxt (fst (dest_Type T)) in (* do case distinction *) Splitter.split_tac ctxt [split] 1 THEN EVERY (map_index (fn (i', _) => (if i' < length case_thms - 1 then resolve_tac ctxt @{thms conjI} 1 else all_tac) THEN REPEAT_DETERM (resolve_tac ctxt @{thms allI} 1) THEN resolve_tac ctxt @{thms impI} 1 THEN (if i' = i then (* continue recursively *) Subgoal.FOCUS (fn {prems, context = ctxt', ...} => CONVERSION (Thm.eta_conversion then_conv right_hand_set_comprehension_conv (K ((HOLogic.conj_conv (HOLogic.eq_conv Conv.all_conv (rewr_conv' (List.last prems)) then_conv (Conv.try_conv (Conv.rewrs_conv (map mk_meta_eq injects)))) Conv.all_conv) then_conv (Conv.try_conv (Conv.rewr_conv del_refl_eq)) then_conv conjunct_assoc_conv)) ctxt' then_conv (HOLogic.Trueprop_conv (HOLogic.eq_conv Conv.all_conv (Collect_conv (fn (_, ctxt'') => Conv.repeat_conv (all_but_last_exists_conv (K (rewr_conv' @{lemma "(\x. x = t \ P x) = P t" by simp})) ctxt'')) ctxt')))) 1) ctxt 1 THEN tac ctxt cont else Subgoal.FOCUS (fn {prems, context = ctxt', ...} => CONVERSION (right_hand_set_comprehension_conv (K (HOLogic.conj_conv ((HOLogic.eq_conv Conv.all_conv (rewr_conv' (List.last prems))) then_conv (Conv.rewrs_conv (map (fn th => th RS @{thm Eq_FalseI}) distincts))) Conv.all_conv then_conv (rewr_conv' @{lemma "(False \ P) = False" by simp}))) ctxt' then_conv HOLogic.Trueprop_conv (HOLogic.eq_conv Conv.all_conv (Collect_conv (fn (_, ctxt'') => Conv.repeat_conv (Conv.bottom_conv (K (rewr_conv' @{lemma "(\x. P) = P" by simp})) ctxt'')) ctxt'))) 1) ctxt 1 THEN resolve_tac ctxt [set_Nil_I] 1)) case_thms) end in fun simproc ctxt redex = let fun make_inner_eqs bound_vs Tis eqs t = (case dest_case ctxt t of SOME (x, T, i, cont, constr_name) => let val (vs, body) = strip_abs (Envir.eta_long (map snd bound_vs) cont) val x' = incr_boundvars (length vs) x val eqs' = map (incr_boundvars (length vs)) eqs val constr_t = list_comb (Const (constr_name, map snd vs ---> T), map Bound (((length vs) - 1) downto 0)) val constr_eq = Const (\<^const_name>\HOL.eq\, T --> T --> \<^typ>\bool\) $ constr_t $ x' in make_inner_eqs (rev vs @ bound_vs) (Case (T, i) :: Tis) (constr_eq :: eqs') body end | NONE => (case dest_if t of SOME (condition, cont) => make_inner_eqs bound_vs (If :: Tis) (condition :: eqs) cont | NONE => if null eqs then NONE (*no rewriting, nothing to be done*) else let val Type (\<^type_name>\list\, [rT]) = fastype_of1 (map snd bound_vs, t) val pat_eq = (case try dest_singleton_list t of SOME t' => Const (\<^const_name>\HOL.eq\, rT --> rT --> \<^typ>\bool\) $ Bound (length bound_vs) $ t' | NONE => Const (\<^const_name>\Set.member\, rT --> HOLogic.mk_setT rT --> \<^typ>\bool\) $ Bound (length bound_vs) $ (mk_set rT $ t)) val reverse_bounds = curry subst_bounds ((map Bound ((length bound_vs - 1) downto 0)) @ [Bound (length bound_vs)]) val eqs' = map reverse_bounds eqs val pat_eq' = reverse_bounds pat_eq val inner_t = fold (fn (_, T) => fn t => HOLogic.exists_const T $ absdummy T t) (rev bound_vs) (fold (curry HOLogic.mk_conj) eqs' pat_eq') val lhs = Thm.term_of redex val rhs = HOLogic.mk_Collect ("x", rT, inner_t) val rewrite_rule_t = HOLogic.mk_Trueprop (HOLogic.mk_eq (lhs, rhs)) in SOME ((Goal.prove ctxt [] [] rewrite_rule_t (fn {context = ctxt', ...} => tac ctxt' (rev Tis))) RS @{thm eq_reflection}) end)) in make_inner_eqs [] [] [] (dest_set (Thm.term_of redex)) end end end \ simproc_setup list_to_set_comprehension ("set xs") = \K List_to_Set_Comprehension.simproc\ code_datatype set coset hide_const (open) coset subsubsection \\<^const>\Nil\ and \<^const>\Cons\\ lemma not_Cons_self [simp]: "xs \ x # xs" by (induct xs) auto lemma not_Cons_self2 [simp]: "x # xs \ xs" by (rule not_Cons_self [symmetric]) lemma neq_Nil_conv: "(xs \ []) = (\y ys. xs = y # ys)" by (induct xs) auto lemma tl_Nil: "tl xs = [] \ xs = [] \ (\x. xs = [x])" by (cases xs) auto lemmas Nil_tl = tl_Nil[THEN eq_iff_swap] lemma length_induct: "(\xs. \ys. length ys < length xs \ P ys \ P xs) \ P xs" by (fact measure_induct) lemma induct_list012: "\P []; \x. P [x]; \x y zs. \ P zs; P (y # zs) \ \ P (x # y # zs)\ \ P xs" by induction_schema (pat_completeness, lexicographic_order) lemma list_nonempty_induct [consumes 1, case_names single cons]: "\ xs \ []; \x. P [x]; \x xs. xs \ [] \ P xs \ P (x # xs)\ \ P xs" by(induction xs rule: induct_list012) auto lemma inj_split_Cons: "inj_on (\(xs, n). n#xs) X" by (auto intro!: inj_onI) lemma inj_on_Cons1 [simp]: "inj_on ((#) x) A" by(simp add: inj_on_def) subsubsection \\<^const>\length\\ text \ Needs to come before \@\ because of theorem \append_eq_append_conv\. \ lemma length_append [simp]: "length (xs @ ys) = length xs + length ys" by (induct xs) auto lemma length_map [simp]: "length (map f xs) = length xs" by (induct xs) auto lemma length_rev [simp]: "length (rev xs) = length xs" by (induct xs) auto lemma length_tl [simp]: "length (tl xs) = length xs - 1" by (cases xs) auto lemma length_0_conv [iff]: "(length xs = 0) = (xs = [])" by (induct xs) auto lemma length_greater_0_conv [iff]: "(0 < length xs) = (xs \ [])" by (induct xs) auto lemma length_pos_if_in_set: "x \ set xs \ length xs > 0" by auto lemma length_Suc_conv: "(length xs = Suc n) = (\y ys. xs = y # ys \ length ys = n)" by (induct xs) auto lemmas Suc_length_conv = length_Suc_conv[THEN eq_iff_swap] lemma Suc_le_length_iff: "(Suc n \ length xs) = (\x ys. xs = x # ys \ n \ length ys)" by (metis Suc_le_D[of n] Suc_le_mono[of n] Suc_length_conv[of _ xs]) lemma impossible_Cons: "length xs \ length ys \ xs = x # ys = False" by (induct xs) auto lemma list_induct2 [consumes 1, case_names Nil Cons]: "length xs = length ys \ P [] [] \ (\x xs y ys. length xs = length ys \ P xs ys \ P (x#xs) (y#ys)) \ P xs ys" proof (induct xs arbitrary: ys) case (Cons x xs ys) then show ?case by (cases ys) simp_all qed simp lemma list_induct3 [consumes 2, case_names Nil Cons]: "length xs = length ys \ length ys = length zs \ P [] [] [] \ (\x xs y ys z zs. length xs = length ys \ length ys = length zs \ P xs ys zs \ P (x#xs) (y#ys) (z#zs)) \ P xs ys zs" proof (induct xs arbitrary: ys zs) case Nil then show ?case by simp next case (Cons x xs ys zs) then show ?case by (cases ys, simp_all) (cases zs, simp_all) qed lemma list_induct4 [consumes 3, case_names Nil Cons]: "length xs = length ys \ length ys = length zs \ length zs = length ws \ P [] [] [] [] \ (\x xs y ys z zs w ws. length xs = length ys \ length ys = length zs \ length zs = length ws \ P xs ys zs ws \ P (x#xs) (y#ys) (z#zs) (w#ws)) \ P xs ys zs ws" proof (induct xs arbitrary: ys zs ws) case Nil then show ?case by simp next case (Cons x xs ys zs ws) then show ?case by ((cases ys, simp_all), (cases zs,simp_all)) (cases ws, simp_all) qed lemma list_induct2': "\ P [] []; \x xs. P (x#xs) []; \y ys. P [] (y#ys); \x xs y ys. P xs ys \ P (x#xs) (y#ys) \ \ P xs ys" by (induct xs arbitrary: ys) (case_tac x, auto)+ lemma list_all2_iff: "list_all2 P xs ys \ length xs = length ys \ (\(x, y) \ set (zip xs ys). P x y)" by (induct xs ys rule: list_induct2') auto lemma neq_if_length_neq: "length xs \ length ys \ (xs = ys) == False" by (rule Eq_FalseI) auto subsubsection \\@\ -- append\ global_interpretation append: monoid append Nil proof fix xs ys zs :: "'a list" show "(xs @ ys) @ zs = xs @ (ys @ zs)" by (induct xs) simp_all show "xs @ [] = xs" by (induct xs) simp_all qed simp lemma append_assoc [simp]: "(xs @ ys) @ zs = xs @ (ys @ zs)" by (fact append.assoc) lemma append_Nil2: "xs @ [] = xs" by (fact append.right_neutral) lemma append_is_Nil_conv [iff]: "(xs @ ys = []) = (xs = [] \ ys = [])" by (induct xs) auto lemmas Nil_is_append_conv [iff] = append_is_Nil_conv[THEN eq_iff_swap] lemma append_self_conv [iff]: "(xs @ ys = xs) = (ys = [])" by (induct xs) auto lemmas self_append_conv [iff] = append_self_conv[THEN eq_iff_swap] lemma append_eq_append_conv [simp]: "length xs = length ys \ length us = length vs \ (xs@us = ys@vs) = (xs=ys \ us=vs)" by (induct xs arbitrary: ys; case_tac ys; force) lemma append_eq_append_conv2: "(xs @ ys = zs @ ts) = (\us. xs = zs @ us \ us @ ys = ts \ xs @ us = zs \ ys = us @ ts)" proof (induct xs arbitrary: ys zs ts) case (Cons x xs) then show ?case by (cases zs) auto qed fastforce lemma same_append_eq [iff, induct_simp]: "(xs @ ys = xs @ zs) = (ys = zs)" by simp lemma append1_eq_conv [iff]: "(xs @ [x] = ys @ [y]) = (xs = ys \ x = y)" by simp lemma append_same_eq [iff, induct_simp]: "(ys @ xs = zs @ xs) = (ys = zs)" by simp lemma append_self_conv2 [iff]: "(xs @ ys = ys) = (xs = [])" using append_same_eq [of _ _ "[]"] by auto lemmas self_append_conv2 [iff] = append_self_conv2[THEN eq_iff_swap] lemma hd_Cons_tl: "xs \ [] \ hd xs # tl xs = xs" by (fact list.collapse) lemma hd_append: "hd (xs @ ys) = (if xs = [] then hd ys else hd xs)" by (induct xs) auto lemma hd_append2 [simp]: "xs \ [] \ hd (xs @ ys) = hd xs" by (simp add: hd_append split: list.split) lemma tl_append: "tl (xs @ ys) = (case xs of [] \ tl ys | z#zs \ zs @ ys)" by (simp split: list.split) lemma tl_append2 [simp]: "xs \ [] \ tl (xs @ ys) = tl xs @ ys" by (simp add: tl_append split: list.split) lemma Cons_eq_append_conv: "x#xs = ys@zs = (ys = [] \ x#xs = zs \ (\ys'. x#ys' = ys \ xs = ys'@zs))" by(cases ys) auto lemma append_eq_Cons_conv: "(ys@zs = x#xs) = (ys = [] \ zs = x#xs \ (\ys'. ys = x#ys' \ ys'@zs = xs))" by(cases ys) auto lemma longest_common_prefix: "\ps xs' ys'. xs = ps @ xs' \ ys = ps @ ys' \ (xs' = [] \ ys' = [] \ hd xs' \ hd ys')" by (induct xs ys rule: list_induct2') (blast, blast, blast, metis (no_types, opaque_lifting) append_Cons append_Nil list.sel(1)) text \Trivial rules for solving \@\-equations automatically.\ lemma eq_Nil_appendI: "xs = ys \ xs = [] @ ys" by simp lemma Cons_eq_appendI: "\x # xs1 = ys; xs = xs1 @ zs\ \ x # xs = ys @ zs" by auto lemma append_eq_appendI: "\xs @ xs1 = zs; ys = xs1 @ us\ \ xs @ ys = zs @ us" by auto text \ Simplification procedure for all list equalities. Currently only tries to rearrange \@\ to see if - both lists end in a singleton list, - or both lists end in the same list. \ simproc_setup list_eq ("(xs::'a list) = ys") = \ let fun last (cons as Const (\<^const_name>\Cons\, _) $ _ $ xs) = (case xs of Const (\<^const_name>\Nil\, _) => cons | _ => last xs) | last (Const(\<^const_name>\append\,_) $ _ $ ys) = last ys | last t = t; fun list1 (Const(\<^const_name>\Cons\,_) $ _ $ Const(\<^const_name>\Nil\,_)) = true | list1 _ = false; fun butlast ((cons as Const(\<^const_name>\Cons\,_) $ x) $ xs) = (case xs of Const (\<^const_name>\Nil\, _) => xs | _ => cons $ butlast xs) | butlast ((app as Const (\<^const_name>\append\, _) $ xs) $ ys) = app $ butlast ys | butlast xs = Const(\<^const_name>\Nil\, fastype_of xs); val rearr_ss = simpset_of (put_simpset HOL_basic_ss \<^context> addsimps [@{thm append_assoc}, @{thm append_Nil}, @{thm append_Cons}]); fun list_eq ctxt (F as (eq as Const(_,eqT)) $ lhs $ rhs) = let val lastl = last lhs and lastr = last rhs; fun rearr conv = let val lhs1 = butlast lhs and rhs1 = butlast rhs; val Type(_,listT::_) = eqT val appT = [listT,listT] ---> listT val app = Const(\<^const_name>\append\,appT) val F2 = eq $ (app$lhs1$lastl) $ (app$rhs1$lastr) val eq = HOLogic.mk_Trueprop (HOLogic.mk_eq (F,F2)); val thm = Goal.prove ctxt [] [] eq (K (simp_tac (put_simpset rearr_ss ctxt) 1)); in SOME ((conv RS (thm RS trans)) RS eq_reflection) end; in if list1 lastl andalso list1 lastr then rearr @{thm append1_eq_conv} else if lastl aconv lastr then rearr @{thm append_same_eq} else NONE end; in fn _ => fn ctxt => fn ct => list_eq ctxt (Thm.term_of ct) end \ subsubsection \\<^const>\map\\ lemma hd_map: "xs \ [] \ hd (map f xs) = f (hd xs)" by (cases xs) simp_all lemma map_tl: "map f (tl xs) = tl (map f xs)" by (cases xs) simp_all lemma map_ext: "(\x. x \ set xs \ f x = g x) \ map f xs = map g xs" by (induct xs) simp_all lemma map_ident [simp]: "map (\x. x) = (\xs. xs)" by (rule ext, induct_tac xs) auto lemma map_append [simp]: "map f (xs @ ys) = map f xs @ map f ys" by (induct xs) auto lemma map_map [simp]: "map f (map g xs) = map (f \ g) xs" by (induct xs) auto lemma map_comp_map[simp]: "((map f) \ (map g)) = map(f \ g)" by (rule ext) simp lemma rev_map: "rev (map f xs) = map f (rev xs)" by (induct xs) auto lemma map_eq_conv[simp]: "(map f xs = map g xs) = (\x \ set xs. f x = g x)" by (induct xs) auto lemma map_cong [fundef_cong]: "xs = ys \ (\x. x \ set ys \ f x = g x) \ map f xs = map g ys" by simp lemma map_is_Nil_conv [iff]: "(map f xs = []) = (xs = [])" by (rule list.map_disc_iff) lemmas Nil_is_map_conv [iff] = map_is_Nil_conv[THEN eq_iff_swap] lemma map_eq_Cons_conv: "(map f xs = y#ys) = (\z zs. xs = z#zs \ f z = y \ map f zs = ys)" by (cases xs) auto lemma Cons_eq_map_conv: "(x#xs = map f ys) = (\z zs. ys = z#zs \ x = f z \ xs = map f zs)" by (cases ys) auto lemmas map_eq_Cons_D = map_eq_Cons_conv [THEN iffD1] lemmas Cons_eq_map_D = Cons_eq_map_conv [THEN iffD1] declare map_eq_Cons_D [dest!] Cons_eq_map_D [dest!] lemma ex_map_conv: "(\xs. ys = map f xs) = (\y \ set ys. \x. y = f x)" by(induct ys, auto simp add: Cons_eq_map_conv) lemma map_eq_imp_length_eq: assumes "map f xs = map g ys" shows "length xs = length ys" using assms proof (induct ys arbitrary: xs) case Nil then show ?case by simp next case (Cons y ys) then obtain z zs where xs: "xs = z # zs" by auto from Cons xs have "map f zs = map g ys" by simp with Cons have "length zs = length ys" by blast with xs show ?case by simp qed lemma map_inj_on: assumes map: "map f xs = map f ys" and inj: "inj_on f (set xs Un set ys)" shows "xs = ys" using map_eq_imp_length_eq [OF map] assms proof (induct rule: list_induct2) case (Cons x xs y ys) then show ?case by (auto intro: sym) qed auto lemma inj_on_map_eq_map: "inj_on f (set xs Un set ys) \ (map f xs = map f ys) = (xs = ys)" by(blast dest:map_inj_on) lemma map_injective: "map f xs = map f ys \ inj f \ xs = ys" by (induct ys arbitrary: xs) (auto dest!:injD) lemma inj_map_eq_map[simp]: "inj f \ (map f xs = map f ys) = (xs = ys)" by(blast dest:map_injective) lemma inj_mapI: "inj f \ inj (map f)" by (iprover dest: map_injective injD intro: inj_onI) lemma inj_mapD: "inj (map f) \ inj f" by (metis (no_types, opaque_lifting) injI list.inject list.simps(9) the_inv_f_f) lemma inj_map[iff]: "inj (map f) = inj f" by (blast dest: inj_mapD intro: inj_mapI) lemma inj_on_mapI: "inj_on f (\(set ` A)) \ inj_on (map f) A" by (blast intro:inj_onI dest:inj_onD map_inj_on) lemma map_idI: "(\x. x \ set xs \ f x = x) \ map f xs = xs" by (induct xs, auto) lemma map_fun_upd [simp]: "y \ set xs \ map (f(y:=v)) xs = map f xs" by (induct xs) auto lemma map_fst_zip[simp]: "length xs = length ys \ map fst (zip xs ys) = xs" by (induct rule:list_induct2, simp_all) lemma map_snd_zip[simp]: "length xs = length ys \ map snd (zip xs ys) = ys" by (induct rule:list_induct2, simp_all) lemma map_fst_zip_take: "map fst (zip xs ys) = take (min (length xs) (length ys)) xs" by (induct xs ys rule: list_induct2') simp_all lemma map_snd_zip_take: "map snd (zip xs ys) = take (min (length xs) (length ys)) ys" by (induct xs ys rule: list_induct2') simp_all lemma map2_map_map: "map2 h (map f xs) (map g xs) = map (\x. h (f x) (g x)) xs" by (induction xs) (auto) functor map: map by (simp_all add: id_def) declare map.id [simp] subsubsection \\<^const>\rev\\ lemma rev_append [simp]: "rev (xs @ ys) = rev ys @ rev xs" by (induct xs) auto lemma rev_rev_ident [simp]: "rev (rev xs) = xs" by (induct xs) auto lemma rev_swap: "(rev xs = ys) = (xs = rev ys)" by auto lemma rev_is_Nil_conv [iff]: "(rev xs = []) = (xs = [])" by (induct xs) auto lemmas Nil_is_rev_conv [iff] = rev_is_Nil_conv[THEN eq_iff_swap] lemma rev_singleton_conv [simp]: "(rev xs = [x]) = (xs = [x])" by (cases xs) auto lemma singleton_rev_conv [simp]: "([x] = rev xs) = ([x] = xs)" by (cases xs) auto lemma rev_is_rev_conv [iff]: "(rev xs = rev ys) = (xs = ys)" proof (induct xs arbitrary: ys) case Nil then show ?case by force next case Cons then show ?case by (cases ys) auto qed lemma inj_on_rev[iff]: "inj_on rev A" by(simp add:inj_on_def) lemma rev_induct [case_names Nil snoc]: assumes "P []" and "\x xs. P xs \ P (xs @ [x])" shows "P xs" proof - have "P (rev (rev xs))" by (rule_tac list = "rev xs" in list.induct, simp_all add: assms) then show ?thesis by simp qed lemma rev_exhaust [case_names Nil snoc]: "(xs = [] \ P) \(\ys y. xs = ys @ [y] \ P) \ P" by (induct xs rule: rev_induct) auto lemmas rev_cases = rev_exhaust lemma rev_nonempty_induct [consumes 1, case_names single snoc]: assumes "xs \ []" and single: "\x. P [x]" and snoc': "\x xs. xs \ [] \ P xs \ P (xs@[x])" shows "P xs" using \xs \ []\ proof (induct xs rule: rev_induct) case (snoc x xs) then show ?case proof (cases xs) case Nil thus ?thesis by (simp add: single) next case Cons with snoc show ?thesis by (fastforce intro!: snoc') qed qed simp lemma rev_eq_Cons_iff[iff]: "(rev xs = y#ys) = (xs = rev ys @ [y])" by(rule rev_cases[of xs]) auto subsubsection \\<^const>\set\\ declare list.set[code_post] \ \pretty output\ lemma finite_set [iff]: "finite (set xs)" by (induct xs) auto lemma set_append [simp]: "set (xs @ ys) = (set xs \ set ys)" by (induct xs) auto lemma hd_in_set[simp]: "xs \ [] \ hd xs \ set xs" by(cases xs) auto lemma set_subset_Cons: "set xs \ set (x # xs)" by auto lemma set_ConsD: "y \ set (x # xs) \ y=x \ y \ set xs" by auto lemma set_empty [iff]: "(set xs = {}) = (xs = [])" by (induct xs) auto lemmas set_empty2[iff] = set_empty[THEN eq_iff_swap] lemma set_rev [simp]: "set (rev xs) = set xs" by (induct xs) auto lemma set_map [simp]: "set (map f xs) = f`(set xs)" by (induct xs) auto lemma set_filter [simp]: "set (filter P xs) = {x. x \ set xs \ P x}" by (induct xs) auto lemma set_upt [simp]: "set[i.. set xs \ \ys zs. xs = ys @ x # zs" proof (induct xs) case Nil thus ?case by simp next case Cons thus ?case by (auto intro: Cons_eq_appendI) qed lemma in_set_conv_decomp: "x \ set xs \ (\ys zs. xs = ys @ x # zs)" by (auto elim: split_list) lemma split_list_first: "x \ set xs \ \ys zs. xs = ys @ x # zs \ x \ set ys" proof (induct xs) case Nil thus ?case by simp next case (Cons a xs) show ?case proof cases assume "x = a" thus ?case using Cons by fastforce next assume "x \ a" thus ?case using Cons by(fastforce intro!: Cons_eq_appendI) qed qed lemma in_set_conv_decomp_first: "(x \ set xs) = (\ys zs. xs = ys @ x # zs \ x \ set ys)" by (auto dest!: split_list_first) lemma split_list_last: "x \ set xs \ \ys zs. xs = ys @ x # zs \ x \ set zs" proof (induct xs rule: rev_induct) case Nil thus ?case by simp next case (snoc a xs) show ?case proof cases assume "x = a" thus ?case using snoc by (auto intro!: exI) next assume "x \ a" thus ?case using snoc by fastforce qed qed lemma in_set_conv_decomp_last: "(x \ set xs) = (\ys zs. xs = ys @ x # zs \ x \ set zs)" by (auto dest!: split_list_last) lemma split_list_prop: "\x \ set xs. P x \ \ys x zs. xs = ys @ x # zs \ P x" proof (induct xs) case Nil thus ?case by simp next case Cons thus ?case by(simp add:Bex_def)(metis append_Cons append.simps(1)) qed lemma split_list_propE: assumes "\x \ set xs. P x" obtains ys x zs where "xs = ys @ x # zs" and "P x" using split_list_prop [OF assms] by blast lemma split_list_first_prop: "\x \ set xs. P x \ \ys x zs. xs = ys@x#zs \ P x \ (\y \ set ys. \ P y)" proof (induct xs) case Nil thus ?case by simp next case (Cons x xs) show ?case proof cases assume "P x" hence "x # xs = [] @ x # xs \ P x \ (\y\set []. \ P y)" by simp thus ?thesis by fast next assume "\ P x" hence "\x\set xs. P x" using Cons(2) by simp thus ?thesis using \\ P x\ Cons(1) by (metis append_Cons set_ConsD) qed qed lemma split_list_first_propE: assumes "\x \ set xs. P x" obtains ys x zs where "xs = ys @ x # zs" and "P x" and "\y \ set ys. \ P y" using split_list_first_prop [OF assms] by blast lemma split_list_first_prop_iff: "(\x \ set xs. P x) \ (\ys x zs. xs = ys@x#zs \ P x \ (\y \ set ys. \ P y))" by (rule, erule split_list_first_prop) auto lemma split_list_last_prop: "\x \ set xs. P x \ \ys x zs. xs = ys@x#zs \ P x \ (\z \ set zs. \ P z)" proof(induct xs rule:rev_induct) case Nil thus ?case by simp next case (snoc x xs) show ?case proof cases assume "P x" thus ?thesis by (auto intro!: exI) next assume "\ P x" hence "\x\set xs. P x" using snoc(2) by simp thus ?thesis using \\ P x\ snoc(1) by fastforce qed qed lemma split_list_last_propE: assumes "\x \ set xs. P x" obtains ys x zs where "xs = ys @ x # zs" and "P x" and "\z \ set zs. \ P z" using split_list_last_prop [OF assms] by blast lemma split_list_last_prop_iff: "(\x \ set xs. P x) \ (\ys x zs. xs = ys@x#zs \ P x \ (\z \ set zs. \ P z))" by rule (erule split_list_last_prop, auto) lemma finite_list: "finite A \ \xs. set xs = A" by (erule finite_induct) (auto simp add: list.set(2)[symmetric] simp del: list.set(2)) lemma card_length: "card (set xs) \ length xs" by (induct xs) (auto simp add: card_insert_if) lemma set_minus_filter_out: "set xs - {y} = set (filter (\x. \ (x = y)) xs)" by (induct xs) auto lemma append_Cons_eq_iff: "\ x \ set xs; x \ set ys \ \ xs @ x # ys = xs' @ x # ys' \ (xs = xs' \ ys = ys')" by(auto simp: append_eq_Cons_conv Cons_eq_append_conv append_eq_append_conv2) subsubsection \\<^const>\concat\\ lemma concat_append [simp]: "concat (xs @ ys) = concat xs @ concat ys" by (induct xs) auto lemma concat_eq_Nil_conv [simp]: "(concat xss = []) = (\xs \ set xss. xs = [])" by (induct xss) auto lemmas Nil_eq_concat_conv [simp] = concat_eq_Nil_conv[THEN eq_iff_swap] lemma set_concat [simp]: "set (concat xs) = (\x\set xs. set x)" by (induct xs) auto lemma concat_map_singleton[simp]: "concat(map (%x. [f x]) xs) = map f xs" by (induct xs) auto lemma map_concat: "map f (concat xs) = concat (map (map f) xs)" by (induct xs) auto lemma rev_concat: "rev (concat xs) = concat (map rev (rev xs))" by (induct xs) auto lemma length_concat_rev[simp]: "length (concat (rev xs)) = length (concat xs)" by (induction xs) auto lemma concat_eq_concat_iff: "\(x, y) \ set (zip xs ys). length x = length y \ length xs = length ys \ (concat xs = concat ys) = (xs = ys)" proof (induct xs arbitrary: ys) case (Cons x xs ys) thus ?case by (cases ys) auto qed (auto) lemma concat_injective: "concat xs = concat ys \ length xs = length ys \ \(x, y) \ set (zip xs ys). length x = length y \ xs = ys" by (simp add: concat_eq_concat_iff) lemma concat_eq_appendD: assumes "concat xss = ys @ zs" "xss \ []" shows "\xss1 xs xs' xss2. xss = xss1 @ (xs @ xs') # xss2 \ ys = concat xss1 @ xs \ zs = xs' @ concat xss2" using assms proof(induction xss arbitrary: ys) case (Cons xs xss) from Cons.prems consider us where "xs @ us = ys" "concat xss = us @ zs" | us where "xs = ys @ us" "us @ concat xss = zs" by(auto simp add: append_eq_append_conv2) then show ?case proof cases case 1 then show ?thesis using Cons.IH[OF 1(2)] by(cases xss)(auto intro: exI[where x="[]"], metis append.assoc append_Cons concat.simps(2)) qed(auto intro: exI[where x="[]"]) qed simp lemma concat_eq_append_conv: "concat xss = ys @ zs \ (if xss = [] then ys = [] \ zs = [] else \xss1 xs xs' xss2. xss = xss1 @ (xs @ xs') # xss2 \ ys = concat xss1 @ xs \ zs = xs' @ concat xss2)" by(auto dest: concat_eq_appendD) lemma hd_concat: "\xs \ []; hd xs \ []\ \ hd (concat xs) = hd (hd xs)" by (metis concat.simps(2) hd_Cons_tl hd_append2) simproc_setup list_neq ("(xs::'a list) = ys") = \ (* Reduces xs=ys to False if xs and ys cannot be of the same length. This is the case if the atomic sublists of one are a submultiset of those of the other list and there are fewer Cons's in one than the other. *) let fun len (Const(\<^const_name>\Nil\,_)) acc = acc | len (Const(\<^const_name>\Cons\,_) $ _ $ xs) (ts,n) = len xs (ts,n+1) | len (Const(\<^const_name>\append\,_) $ xs $ ys) acc = len xs (len ys acc) | len (Const(\<^const_name>\rev\,_) $ xs) acc = len xs acc | len (Const(\<^const_name>\map\,_) $ _ $ xs) acc = len xs acc | len (Const(\<^const_name>\concat\,T) $ (Const(\<^const_name>\rev\,_) $ xss)) acc = len (Const(\<^const_name>\concat\,T) $ xss) acc | len t (ts,n) = (t::ts,n); val ss = simpset_of \<^context>; fun list_neq ctxt ct = let val (Const(_,eqT) $ lhs $ rhs) = Thm.term_of ct; val (ls,m) = len lhs ([],0) and (rs,n) = len rhs ([],0); fun prove_neq() = let val Type(_,listT::_) = eqT; val size = HOLogic.size_const listT; val eq_len = HOLogic.mk_eq (size $ lhs, size $ rhs); val neq_len = HOLogic.mk_Trueprop (HOLogic.Not $ eq_len); val thm = Goal.prove ctxt [] [] neq_len (K (simp_tac (put_simpset ss ctxt) 1)); in SOME (thm RS @{thm neq_if_length_neq}) end in if m < n andalso submultiset (op aconv) (ls,rs) orelse n < m andalso submultiset (op aconv) (rs,ls) then prove_neq() else NONE end; in K list_neq end \ subsubsection \\<^const>\filter\\ lemma filter_append [simp]: "filter P (xs @ ys) = filter P xs @ filter P ys" by (induct xs) auto lemma rev_filter: "rev (filter P xs) = filter P (rev xs)" by (induct xs) simp_all lemma filter_filter [simp]: "filter P (filter Q xs) = filter (\x. Q x \ P x) xs" by (induct xs) auto lemma filter_concat: "filter p (concat xs) = concat (map (filter p) xs)" by (induct xs) auto lemma length_filter_le [simp]: "length (filter P xs) \ length xs" by (induct xs) (auto simp add: le_SucI) lemma sum_length_filter_compl: "length(filter P xs) + length(filter (\x. \P x) xs) = length xs" by(induct xs) simp_all lemma filter_True [simp]: "\x \ set xs. P x \ filter P xs = xs" by (induct xs) auto lemma filter_False [simp]: "\x \ set xs. \ P x \ filter P xs = []" by (induct xs) auto lemma filter_empty_conv: "(filter P xs = []) = (\x\set xs. \ P x)" by (induct xs) simp_all lemmas empty_filter_conv = filter_empty_conv[THEN eq_iff_swap] lemma filter_id_conv: "(filter P xs = xs) = (\x\set xs. P x)" proof (induct xs) case (Cons x xs) then show ?case using length_filter_le by (simp add: impossible_Cons) qed auto lemma filter_map: "filter P (map f xs) = map f (filter (P \ f) xs)" by (induct xs) simp_all lemma length_filter_map[simp]: "length (filter P (map f xs)) = length(filter (P \ f) xs)" by (simp add:filter_map) lemma filter_is_subset [simp]: "set (filter P xs) \ set xs" by auto lemma length_filter_less: "\ x \ set xs; \ P x \ \ length(filter P xs) < length xs" proof (induct xs) case Nil thus ?case by simp next case (Cons x xs) thus ?case using Suc_le_eq by fastforce qed lemma length_filter_conv_card: "length(filter p xs) = card{i. i < length xs \ p(xs!i)}" proof (induct xs) case Nil thus ?case by simp next case (Cons x xs) let ?S = "{i. i < length xs \ p(xs!i)}" have fin: "finite ?S" by(fast intro: bounded_nat_set_is_finite) show ?case (is "?l = card ?S'") proof (cases) assume "p x" hence eq: "?S' = insert 0 (Suc ` ?S)" by(auto simp: image_def split:nat.split dest:gr0_implies_Suc) have "length (filter p (x # xs)) = Suc(card ?S)" using Cons \p x\ by simp also have "\ = Suc(card(Suc ` ?S))" using fin by (simp add: card_image) also have "\ = card ?S'" using eq fin by (simp add:card_insert_if) finally show ?thesis . next assume "\ p x" hence eq: "?S' = Suc ` ?S" by(auto simp add: image_def split:nat.split elim:lessE) have "length (filter p (x # xs)) = card ?S" using Cons \\ p x\ by simp also have "\ = card(Suc ` ?S)" using fin by (simp add: card_image) also have "\ = card ?S'" using eq fin by (simp add:card_insert_if) finally show ?thesis . qed qed lemma Cons_eq_filterD: "x#xs = filter P ys \ \us vs. ys = us @ x # vs \ (\u\set us. \ P u) \ P x \ xs = filter P vs" (is "_ \ \us vs. ?P ys us vs") proof(induct ys) case Nil thus ?case by simp next case (Cons y ys) show ?case (is "\x. ?Q x") proof cases assume Py: "P y" show ?thesis proof cases assume "x = y" with Py Cons.prems have "?Q []" by simp then show ?thesis .. next assume "x \ y" with Py Cons.prems show ?thesis by simp qed next assume "\ P y" with Cons obtain us vs where "?P (y#ys) (y#us) vs" by fastforce then have "?Q (y#us)" by simp then show ?thesis .. qed qed lemma filter_eq_ConsD: "filter P ys = x#xs \ \us vs. ys = us @ x # vs \ (\u\set us. \ P u) \ P x \ xs = filter P vs" by(rule Cons_eq_filterD) simp lemma filter_eq_Cons_iff: "(filter P ys = x#xs) = (\us vs. ys = us @ x # vs \ (\u\set us. \ P u) \ P x \ xs = filter P vs)" by(auto dest:filter_eq_ConsD) lemmas Cons_eq_filter_iff = filter_eq_Cons_iff[THEN eq_iff_swap] lemma inj_on_filter_key_eq: assumes "inj_on f (insert y (set xs))" shows "filter (\x. f y = f x) xs = filter (HOL.eq y) xs" using assms by (induct xs) auto lemma filter_cong[fundef_cong]: "xs = ys \ (\x. x \ set ys \ P x = Q x) \ filter P xs = filter Q ys" by (induct ys arbitrary: xs) auto subsubsection \List partitioning\ primrec partition :: "('a \ bool) \'a list \ 'a list \ 'a list" where "partition P [] = ([], [])" | "partition P (x # xs) = (let (yes, no) = partition P xs in if P x then (x # yes, no) else (yes, x # no))" lemma partition_filter1: "fst (partition P xs) = filter P xs" by (induct xs) (auto simp add: Let_def split_def) lemma partition_filter2: "snd (partition P xs) = filter (Not \ P) xs" by (induct xs) (auto simp add: Let_def split_def) lemma partition_P: assumes "partition P xs = (yes, no)" shows "(\p \ set yes. P p) \ (\p \ set no. \ P p)" proof - from assms have "yes = fst (partition P xs)" and "no = snd (partition P xs)" by simp_all then show ?thesis by (simp_all add: partition_filter1 partition_filter2) qed lemma partition_set: assumes "partition P xs = (yes, no)" shows "set yes \ set no = set xs" proof - from assms have "yes = fst (partition P xs)" and "no = snd (partition P xs)" by simp_all then show ?thesis by (auto simp add: partition_filter1 partition_filter2) qed lemma partition_filter_conv[simp]: "partition f xs = (filter f xs,filter (Not \ f) xs)" unfolding partition_filter2[symmetric] unfolding partition_filter1[symmetric] by simp declare partition.simps[simp del] subsubsection \\<^const>\nth\\ lemma nth_Cons_0 [simp, code]: "(x # xs)!0 = x" by auto lemma nth_Cons_Suc [simp, code]: "(x # xs)!(Suc n) = xs!n" by auto declare nth.simps [simp del] lemma nth_Cons_pos[simp]: "0 < n \ (x#xs) ! n = xs ! (n - 1)" by(auto simp: Nat.gr0_conv_Suc) lemma nth_append: "(xs @ ys)!n = (if n < length xs then xs!n else ys!(n - length xs))" proof (induct xs arbitrary: n) case (Cons x xs) then show ?case using less_Suc_eq_0_disj by auto qed simp lemma nth_append_length [simp]: "(xs @ x # ys) ! length xs = x" by (induct xs) auto lemma nth_append_length_plus[simp]: "(xs @ ys) ! (length xs + n) = ys ! n" by (induct xs) auto lemma nth_map [simp]: "n < length xs \ (map f xs)!n = f(xs!n)" proof (induct xs arbitrary: n) case (Cons x xs) then show ?case using less_Suc_eq_0_disj by auto qed simp lemma nth_tl: "n < length (tl xs) \ tl xs ! n = xs ! Suc n" by (induction xs) auto lemma hd_conv_nth: "xs \ [] \ hd xs = xs!0" by(cases xs) simp_all lemma list_eq_iff_nth_eq: "(xs = ys) = (length xs = length ys \ (\i ?R" by force show "?R \ ?L" using less_Suc_eq_0_disj by auto qed with Cons show ?case by simp qed simp lemma in_set_conv_nth: "(x \ set xs) = (\i < length xs. xs!i = x)" by(auto simp:set_conv_nth) lemma nth_equal_first_eq: assumes "x \ set xs" assumes "n \ length xs" shows "(x # xs) ! n = x \ n = 0" (is "?lhs \ ?rhs") proof assume ?lhs show ?rhs proof (rule ccontr) assume "n \ 0" then have "n > 0" by simp with \?lhs\ have "xs ! (n - 1) = x" by simp moreover from \n > 0\ \n \ length xs\ have "n - 1 < length xs" by simp ultimately have "\ix \ set xs\ in_set_conv_nth [of x xs] show False by simp qed next assume ?rhs then show ?lhs by simp qed lemma nth_non_equal_first_eq: assumes "x \ y" shows "(x # xs) ! n = y \ xs ! (n - 1) = y \ n > 0" (is "?lhs \ ?rhs") proof assume "?lhs" with assms have "n > 0" by (cases n) simp_all with \?lhs\ show ?rhs by simp next assume "?rhs" then show "?lhs" by simp qed lemma list_ball_nth: "\n < length xs; \x \ set xs. P x\ \ P(xs!n)" by (auto simp add: set_conv_nth) lemma nth_mem [simp]: "n < length xs \ xs!n \ set xs" by (auto simp add: set_conv_nth) lemma all_nth_imp_all_set: "\\i < length xs. P(xs!i); x \ set xs\ \ P x" by (auto simp add: set_conv_nth) lemma all_set_conv_all_nth: "(\x \ set xs. P x) = (\i. i < length xs \ P (xs ! i))" by (auto simp add: set_conv_nth) lemma rev_nth: "n < size xs \ rev xs ! n = xs ! (length xs - Suc n)" proof (induct xs arbitrary: n) case Nil thus ?case by simp next case (Cons x xs) hence n: "n < Suc (length xs)" by simp moreover { assume "n < length xs" with n obtain n' where n': "length xs - n = Suc n'" by (cases "length xs - n", auto) moreover from n' have "length xs - Suc n = n'" by simp ultimately have "xs ! (length xs - Suc n) = (x # xs) ! (length xs - n)" by simp } ultimately show ?case by (clarsimp simp add: Cons nth_append) qed lemma Skolem_list_nth: "(\ix. P i x) = (\xs. size xs = k \ (\ixs. ?P k xs)") proof(induct k) case 0 show ?case by simp next case (Suc k) show ?case (is "?L = ?R" is "_ = (\xs. ?P' xs)") proof assume "?R" thus "?L" using Suc by auto next assume "?L" with Suc obtain x xs where "?P k xs \ P k x" by (metis less_Suc_eq) hence "?P'(xs@[x])" by(simp add:nth_append less_Suc_eq) thus "?R" .. qed qed subsubsection \\<^const>\list_update\\ lemma length_list_update [simp]: "length(xs[i:=x]) = length xs" by (induct xs arbitrary: i) (auto split: nat.split) lemma nth_list_update: "i < length xs\ (xs[i:=x])!j = (if i = j then x else xs!j)" by (induct xs arbitrary: i j) (auto simp add: nth_Cons split: nat.split) lemma nth_list_update_eq [simp]: "i < length xs \ (xs[i:=x])!i = x" by (simp add: nth_list_update) lemma nth_list_update_neq [simp]: "i \ j \ xs[i:=x]!j = xs!j" by (induct xs arbitrary: i j) (auto simp add: nth_Cons split: nat.split) lemma list_update_id[simp]: "xs[i := xs!i] = xs" by (induct xs arbitrary: i) (simp_all split:nat.splits) lemma list_update_beyond[simp]: "length xs \ i \ xs[i:=x] = xs" proof (induct xs arbitrary: i) case (Cons x xs i) then show ?case by (metis leD length_list_update list_eq_iff_nth_eq nth_list_update_neq) qed simp lemma list_update_nonempty[simp]: "xs[k:=x] = [] \ xs=[]" by (simp only: length_0_conv[symmetric] length_list_update) lemma list_update_same_conv: "i < length xs \ (xs[i := x] = xs) = (xs!i = x)" by (induct xs arbitrary: i) (auto split: nat.split) lemma list_update_append1: "i < size xs \ (xs @ ys)[i:=x] = xs[i:=x] @ ys" by (induct xs arbitrary: i)(auto split:nat.split) lemma list_update_append: "(xs @ ys) [n:= x] = (if n < length xs then xs[n:= x] @ ys else xs @ (ys [n-length xs:= x]))" by (induct xs arbitrary: n) (auto split:nat.splits) lemma list_update_length [simp]: "(xs @ x # ys)[length xs := y] = (xs @ y # ys)" by (induct xs, auto) lemma map_update: "map f (xs[k:= y]) = (map f xs)[k := f y]" by(induct xs arbitrary: k)(auto split:nat.splits) lemma rev_update: "k < length xs \ rev (xs[k:= y]) = (rev xs)[length xs - k - 1 := y]" by (induct xs arbitrary: k) (auto simp: list_update_append split:nat.splits) lemma update_zip: "(zip xs ys)[i:=xy] = zip (xs[i:=fst xy]) (ys[i:=snd xy])" by (induct ys arbitrary: i xy xs) (auto, case_tac xs, auto split: nat.split) lemma set_update_subset_insert: "set(xs[i:=x]) \ insert x (set xs)" by (induct xs arbitrary: i) (auto split: nat.split) lemma set_update_subsetI: "\set xs \ A; x \ A\ \ set(xs[i := x]) \ A" by (blast dest!: set_update_subset_insert [THEN subsetD]) lemma set_update_memI: "n < length xs \ x \ set (xs[n := x])" by (induct xs arbitrary: n) (auto split:nat.splits) lemma list_update_overwrite[simp]: "xs [i := x, i := y] = xs [i := y]" by (induct xs arbitrary: i) (simp_all split: nat.split) lemma list_update_swap: "i \ i' \ xs [i := x, i' := x'] = xs [i' := x', i := x]" by (induct xs arbitrary: i i') (simp_all split: nat.split) lemma list_update_code [code]: "[][i := y] = []" "(x # xs)[0 := y] = y # xs" "(x # xs)[Suc i := y] = x # xs[i := y]" by simp_all subsubsection \\<^const>\last\ and \<^const>\butlast\\ lemma hd_Nil_eq_last: "hd Nil = last Nil" unfolding hd_def last_def by simp lemma last_snoc [simp]: "last (xs @ [x]) = x" by (induct xs) auto lemma butlast_snoc [simp]: "butlast (xs @ [x]) = xs" by (induct xs) auto lemma last_ConsL: "xs = [] \ last(x#xs) = x" by simp lemma last_ConsR: "xs \ [] \ last(x#xs) = last xs" by simp lemma last_append: "last(xs @ ys) = (if ys = [] then last xs else last ys)" by (induct xs) (auto) lemma last_appendL[simp]: "ys = [] \ last(xs @ ys) = last xs" by(simp add:last_append) lemma last_appendR[simp]: "ys \ [] \ last(xs @ ys) = last ys" by(simp add:last_append) lemma last_tl: "xs = [] \ tl xs \ [] \last (tl xs) = last xs" by (induct xs) simp_all lemma butlast_tl: "butlast (tl xs) = tl (butlast xs)" by (induct xs) simp_all lemma hd_rev: "hd(rev xs) = last xs" by (metis hd_Cons_tl hd_Nil_eq_last last_snoc rev_eq_Cons_iff rev_is_Nil_conv) lemma last_rev: "last(rev xs) = hd xs" by (metis hd_rev rev_swap) lemma last_in_set[simp]: "as \ [] \ last as \ set as" by (induct as) auto lemma length_butlast [simp]: "length (butlast xs) = length xs - 1" by (induct xs rule: rev_induct) auto lemma butlast_append: "butlast (xs @ ys) = (if ys = [] then butlast xs else xs @ butlast ys)" by (induct xs arbitrary: ys) auto lemma append_butlast_last_id [simp]: "xs \ [] \ butlast xs @ [last xs] = xs" by (induct xs) auto lemma in_set_butlastD: "x \ set (butlast xs) \ x \ set xs" by (induct xs) (auto split: if_split_asm) lemma in_set_butlast_appendI: "x \ set (butlast xs) \ x \ set (butlast ys) \ x \ set (butlast (xs @ ys))" by (auto dest: in_set_butlastD simp add: butlast_append) lemma last_drop[simp]: "n < length xs \ last (drop n xs) = last xs" by (induct xs arbitrary: n)(auto split:nat.split) lemma nth_butlast: assumes "n < length (butlast xs)" shows "butlast xs ! n = xs ! n" proof (cases xs) case (Cons y ys) moreover from assms have "butlast xs ! n = (butlast xs @ [last xs]) ! n" by (simp add: nth_append) ultimately show ?thesis using append_butlast_last_id by simp qed simp lemma last_conv_nth: "xs\[] \ last xs = xs!(length xs - 1)" by(induct xs)(auto simp:neq_Nil_conv) lemma butlast_conv_take: "butlast xs = take (length xs - 1) xs" by (induction xs rule: induct_list012) simp_all lemma last_list_update: "xs \ [] \ last(xs[k:=x]) = (if k = size xs - 1 then x else last xs)" by (auto simp: last_conv_nth) lemma butlast_list_update: "butlast(xs[k:=x]) = (if k = size xs - 1 then butlast xs else (butlast xs)[k:=x])" by(cases xs rule:rev_cases)(auto simp: list_update_append split: nat.splits) lemma last_map: "xs \ [] \ last (map f xs) = f (last xs)" by (cases xs rule: rev_cases) simp_all lemma map_butlast: "map f (butlast xs) = butlast (map f xs)" by (induct xs) simp_all lemma snoc_eq_iff_butlast: "xs @ [x] = ys \ (ys \ [] \ butlast ys = xs \ last ys = x)" by fastforce corollary longest_common_suffix: "\ss xs' ys'. xs = xs' @ ss \ ys = ys' @ ss \ (xs' = [] \ ys' = [] \ last xs' \ last ys')" using longest_common_prefix[of "rev xs" "rev ys"] unfolding rev_swap rev_append by (metis last_rev rev_is_Nil_conv) lemma butlast_rev [simp]: "butlast (rev xs) = rev (tl xs)" by (cases xs) simp_all subsubsection \\<^const>\take\ and \<^const>\drop\\ lemma take_0: "take 0 xs = []" by (induct xs) auto lemma drop_0: "drop 0 xs = xs" by (induct xs) auto lemma take0[simp]: "take 0 = (\xs. [])" by(rule ext) (rule take_0) lemma drop0[simp]: "drop 0 = (\x. x)" by(rule ext) (rule drop_0) lemma take_Suc_Cons [simp]: "take (Suc n) (x # xs) = x # take n xs" by simp lemma drop_Suc_Cons [simp]: "drop (Suc n) (x # xs) = drop n xs" by simp declare take_Cons [simp del] and drop_Cons [simp del] lemma take_Suc: "xs \ [] \ take (Suc n) xs = hd xs # take n (tl xs)" by(clarsimp simp add:neq_Nil_conv) lemma drop_Suc: "drop (Suc n) xs = drop n (tl xs)" by(cases xs, simp_all) lemma hd_take[simp]: "j > 0 \ hd (take j xs) = hd xs" by (metis gr0_conv_Suc list.sel(1) take.simps(1) take_Suc) lemma take_tl: "take n (tl xs) = tl (take (Suc n) xs)" by (induct xs arbitrary: n) simp_all lemma drop_tl: "drop n (tl xs) = tl(drop n xs)" by(induct xs arbitrary: n, simp_all add:drop_Cons drop_Suc split:nat.split) lemma tl_take: "tl (take n xs) = take (n - 1) (tl xs)" by (cases n, simp, cases xs, auto) lemma tl_drop: "tl (drop n xs) = drop n (tl xs)" by (simp only: drop_tl) lemma nth_via_drop: "drop n xs = y#ys \ xs!n = y" by (induct xs arbitrary: n, simp)(auto simp: drop_Cons nth_Cons split: nat.splits) lemma take_Suc_conv_app_nth: "i < length xs \ take (Suc i) xs = take i xs @ [xs!i]" proof (induct xs arbitrary: i) case Nil then show ?case by simp next case Cons then show ?case by (cases i) auto qed lemma Cons_nth_drop_Suc: "i < length xs \ (xs!i) # (drop (Suc i) xs) = drop i xs" proof (induct xs arbitrary: i) case Nil then show ?case by simp next case Cons then show ?case by (cases i) auto qed lemma length_take [simp]: "length (take n xs) = min (length xs) n" by (induct n arbitrary: xs) (auto, case_tac xs, auto) lemma length_drop [simp]: "length (drop n xs) = (length xs - n)" by (induct n arbitrary: xs) (auto, case_tac xs, auto) lemma take_all [simp]: "length xs \ n \ take n xs = xs" by (induct n arbitrary: xs) (auto, case_tac xs, auto) lemma drop_all [simp]: "length xs \ n \ drop n xs = []" by (induct n arbitrary: xs) (auto, case_tac xs, auto) lemma take_all_iff [simp]: "take n xs = xs \ length xs \ n" by (metis length_take min.order_iff take_all) (* Looks like a good simp rule but can cause looping; too much interaction between take and length lemmas take_all_iff2[simp] = take_all_iff[THEN eq_iff_swap] *) lemma take_eq_Nil[simp]: "(take n xs = []) = (n = 0 \ xs = [])" by(induct xs arbitrary: n)(auto simp: take_Cons split:nat.split) lemmas take_eq_Nil2[simp] = take_eq_Nil[THEN eq_iff_swap] lemma drop_eq_Nil [simp]: "drop n xs = [] \ length xs \ n" by (metis diff_is_0_eq drop_all length_drop list.size(3)) lemmas drop_eq_Nil2 [simp] = drop_eq_Nil[THEN eq_iff_swap] lemma take_append [simp]: "take n (xs @ ys) = (take n xs @ take (n - length xs) ys)" by (induct n arbitrary: xs) (auto, case_tac xs, auto) lemma drop_append [simp]: "drop n (xs @ ys) = drop n xs @ drop (n - length xs) ys" by (induct n arbitrary: xs) (auto, case_tac xs, auto) lemma take_take [simp]: "take n (take m xs) = take (min n m) xs" proof (induct m arbitrary: xs n) case 0 then show ?case by simp next case Suc then show ?case by (cases xs; cases n) simp_all qed lemma drop_drop [simp]: "drop n (drop m xs) = drop (n + m) xs" proof (induct m arbitrary: xs) case 0 then show ?case by simp next case Suc then show ?case by (cases xs) simp_all qed lemma take_drop: "take n (drop m xs) = drop m (take (n + m) xs)" proof (induct m arbitrary: xs n) case 0 then show ?case by simp next case Suc then show ?case by (cases xs; cases n) simp_all qed lemma drop_take: "drop n (take m xs) = take (m-n) (drop n xs)" by(induct xs arbitrary: m n)(auto simp: take_Cons drop_Cons split: nat.split) lemma append_take_drop_id [simp]: "take n xs @ drop n xs = xs" proof (induct n arbitrary: xs) case 0 then show ?case by simp next case Suc then show ?case by (cases xs) simp_all qed lemma take_map: "take n (map f xs) = map f (take n xs)" proof (induct n arbitrary: xs) case 0 then show ?case by simp next case Suc then show ?case by (cases xs) simp_all qed lemma drop_map: "drop n (map f xs) = map f (drop n xs)" proof (induct n arbitrary: xs) case 0 then show ?case by simp next case Suc then show ?case by (cases xs) simp_all qed lemma rev_take: "rev (take i xs) = drop (length xs - i) (rev xs)" proof (induct xs arbitrary: i) case Nil then show ?case by simp next case Cons then show ?case by (cases i) auto qed lemma rev_drop: "rev (drop i xs) = take (length xs - i) (rev xs)" proof (induct xs arbitrary: i) case Nil then show ?case by simp next case Cons then show ?case by (cases i) auto qed lemma drop_rev: "drop n (rev xs) = rev (take (length xs - n) xs)" by (cases "length xs < n") (auto simp: rev_take) lemma take_rev: "take n (rev xs) = rev (drop (length xs - n) xs)" by (cases "length xs < n") (auto simp: rev_drop) lemma nth_take [simp]: "i < n \ (take n xs)!i = xs!i" proof (induct xs arbitrary: i n) case Nil then show ?case by simp next case Cons then show ?case by (cases n; cases i) simp_all qed lemma nth_drop [simp]: "n \ length xs \ (drop n xs)!i = xs!(n + i)" proof (induct n arbitrary: xs) case 0 then show ?case by simp next case Suc then show ?case by (cases xs) simp_all qed lemma butlast_take: "n \ length xs \ butlast (take n xs) = take (n - 1) xs" by (simp add: butlast_conv_take min.absorb1 min.absorb2) lemma butlast_drop: "butlast (drop n xs) = drop n (butlast xs)" by (simp add: butlast_conv_take drop_take ac_simps) lemma take_butlast: "n < length xs \ take n (butlast xs) = take n xs" by (simp add: butlast_conv_take min.absorb1) lemma drop_butlast: "drop n (butlast xs) = butlast (drop n xs)" by (simp add: butlast_conv_take drop_take ac_simps) lemma butlast_power: "(butlast ^^ n) xs = take (length xs - n) xs" by (induct n) (auto simp: butlast_take) lemma hd_drop_conv_nth: "n < length xs \ hd(drop n xs) = xs!n" by(simp add: hd_conv_nth) lemma set_take_subset_set_take: "m \ n \ set(take m xs) \ set(take n xs)" proof (induct xs arbitrary: m n) case (Cons x xs m n) then show ?case by (cases n) (auto simp: take_Cons) qed simp lemma set_take_subset: "set(take n xs) \ set xs" by(induct xs arbitrary: n)(auto simp:take_Cons split:nat.split) lemma set_drop_subset: "set(drop n xs) \ set xs" by(induct xs arbitrary: n)(auto simp:drop_Cons split:nat.split) lemma set_drop_subset_set_drop: "m \ n \ set(drop m xs) \ set(drop n xs)" proof (induct xs arbitrary: m n) case (Cons x xs m n) then show ?case by (clarsimp simp: drop_Cons split: nat.split) (metis set_drop_subset subset_iff) qed simp lemma in_set_takeD: "x \ set(take n xs) \ x \ set xs" using set_take_subset by fast lemma in_set_dropD: "x \ set(drop n xs) \ x \ set xs" using set_drop_subset by fast lemma append_eq_conv_conj: "(xs @ ys = zs) = (xs = take (length xs) zs \ ys = drop (length xs) zs)" proof (induct xs arbitrary: zs) case (Cons x xs zs) then show ?case by (cases zs, auto) qed auto lemma map_eq_append_conv: "map f xs = ys @ zs \ (\us vs. xs = us @ vs \ ys = map f us \ zs = map f vs)" proof - have "map f xs \ ys @ zs \ map f xs \ ys @ zs \ map f xs \ ys @ zs \ map f xs = ys @ zs \ (\bs bsa. xs = bs @ bsa \ ys = map f bs \ zs = map f bsa)" by (metis append_eq_conv_conj append_take_drop_id drop_map take_map) then show ?thesis using map_append by blast qed lemmas append_eq_map_conv = map_eq_append_conv[THEN eq_iff_swap] lemma take_add: "take (i+j) xs = take i xs @ take j (drop i xs)" proof (induct xs arbitrary: i) case (Cons x xs i) then show ?case by (cases i, auto) qed auto lemma append_eq_append_conv_if: "(xs\<^sub>1 @ xs\<^sub>2 = ys\<^sub>1 @ ys\<^sub>2) = (if size xs\<^sub>1 \ size ys\<^sub>1 then xs\<^sub>1 = take (size xs\<^sub>1) ys\<^sub>1 \ xs\<^sub>2 = drop (size xs\<^sub>1) ys\<^sub>1 @ ys\<^sub>2 else take (size ys\<^sub>1) xs\<^sub>1 = ys\<^sub>1 \ drop (size ys\<^sub>1) xs\<^sub>1 @ xs\<^sub>2 = ys\<^sub>2)" proof (induct xs\<^sub>1 arbitrary: ys\<^sub>1) case (Cons a xs\<^sub>1 ys\<^sub>1) then show ?case by (cases ys\<^sub>1, auto) qed auto lemma take_hd_drop: "n < length xs \ take n xs @ [hd (drop n xs)] = take (Suc n) xs" by (induct xs arbitrary: n) (simp_all add:drop_Cons split:nat.split) lemma id_take_nth_drop: "i < length xs \ xs = take i xs @ xs!i # drop (Suc i) xs" proof - assume si: "i < length xs" hence "xs = take (Suc i) xs @ drop (Suc i) xs" by auto moreover from si have "take (Suc i) xs = take i xs @ [xs!i]" using take_Suc_conv_app_nth by blast ultimately show ?thesis by auto qed lemma take_update_cancel[simp]: "n \ m \ take n (xs[m := y]) = take n xs" by(simp add: list_eq_iff_nth_eq) lemma drop_update_cancel[simp]: "n < m \ drop m (xs[n := x]) = drop m xs" by(simp add: list_eq_iff_nth_eq) lemma upd_conv_take_nth_drop: "i < length xs \ xs[i:=a] = take i xs @ a # drop (Suc i) xs" proof - assume i: "i < length xs" have "xs[i:=a] = (take i xs @ xs!i # drop (Suc i) xs)[i:=a]" by(rule arg_cong[OF id_take_nth_drop[OF i]]) also have "\ = take i xs @ a # drop (Suc i) xs" using i by (simp add: list_update_append) finally show ?thesis . qed lemma take_update_swap: "take m (xs[n := x]) = (take m xs)[n := x]" proof (cases "n \ length xs") case False then show ?thesis by (simp add: upd_conv_take_nth_drop take_Cons drop_take min_def diff_Suc split: nat.split) qed auto lemma drop_update_swap: assumes "m \ n" shows "drop m (xs[n := x]) = (drop m xs)[n-m := x]" proof (cases "n \ length xs") case False with assms show ?thesis by (simp add: upd_conv_take_nth_drop drop_take) qed auto lemma nth_image: "l \ size xs \ nth xs ` {0..\<^const>\takeWhile\ and \<^const>\dropWhile\\ lemma length_takeWhile_le: "length (takeWhile P xs) \ length xs" by (induct xs) auto lemma takeWhile_dropWhile_id [simp]: "takeWhile P xs @ dropWhile P xs = xs" by (induct xs) auto lemma takeWhile_append1 [simp]: "\x \ set xs; \P(x)\ \ takeWhile P (xs @ ys) = takeWhile P xs" by (induct xs) auto lemma takeWhile_append2 [simp]: "(\x. x \ set xs \ P x) \ takeWhile P (xs @ ys) = xs @ takeWhile P ys" by (induct xs) auto lemma takeWhile_append: "takeWhile P (xs @ ys) = (if \x\set xs. P x then xs @ takeWhile P ys else takeWhile P xs)" using takeWhile_append1[of _ xs P ys] takeWhile_append2[of xs P ys] by auto lemma takeWhile_tail: "\ P x \ takeWhile P (xs @ (x#l)) = takeWhile P xs" by (induct xs) auto lemma takeWhile_eq_Nil_iff: "takeWhile P xs = [] \ xs = [] \ \P (hd xs)" by (cases xs) auto lemma takeWhile_nth: "j < length (takeWhile P xs) \ takeWhile P xs ! j = xs ! j" by (metis nth_append takeWhile_dropWhile_id) lemma dropWhile_nth: "j < length (dropWhile P xs) \ dropWhile P xs ! j = xs ! (j + length (takeWhile P xs))" by (metis add.commute nth_append_length_plus takeWhile_dropWhile_id) lemma length_dropWhile_le: "length (dropWhile P xs) \ length xs" by (induct xs) auto lemma dropWhile_append1 [simp]: "\x \ set xs; \P(x)\ \ dropWhile P (xs @ ys) = (dropWhile P xs)@ys" by (induct xs) auto lemma dropWhile_append2 [simp]: "(\x. x \ set xs \ P(x)) \ dropWhile P (xs @ ys) = dropWhile P ys" by (induct xs) auto lemma dropWhile_append3: "\ P y \dropWhile P (xs @ y # ys) = dropWhile P xs @ y # ys" by (induct xs) auto lemma dropWhile_append: "dropWhile P (xs @ ys) = (if \x\set xs. P x then dropWhile P ys else dropWhile P xs @ ys)" using dropWhile_append1[of _ xs P ys] dropWhile_append2[of xs P ys] by auto lemma dropWhile_last: "x \ set xs \ \ P x \ last (dropWhile P xs) = last xs" by (auto simp add: dropWhile_append3 in_set_conv_decomp) lemma set_dropWhileD: "x \ set (dropWhile P xs) \ x \ set xs" by (induct xs) (auto split: if_split_asm) lemma set_takeWhileD: "x \ set (takeWhile P xs) \ x \ set xs \ P x" by (induct xs) (auto split: if_split_asm) lemma takeWhile_eq_all_conv[simp]: "(takeWhile P xs = xs) = (\x \ set xs. P x)" by(induct xs, auto) lemma dropWhile_eq_Nil_conv[simp]: "(dropWhile P xs = []) = (\x \ set xs. P x)" by(induct xs, auto) lemma dropWhile_eq_Cons_conv: "(dropWhile P xs = y#ys) = (xs = takeWhile P xs @ y # ys \ \ P y)" by(induct xs, auto) lemma dropWhile_eq_self_iff: "dropWhile P xs = xs \ xs = [] \ \P (hd xs)" by (cases xs) (auto simp: dropWhile_eq_Cons_conv) lemma distinct_takeWhile[simp]: "distinct xs \ distinct (takeWhile P xs)" by (induct xs) (auto dest: set_takeWhileD) lemma distinct_dropWhile[simp]: "distinct xs \ distinct (dropWhile P xs)" by (induct xs) auto lemma takeWhile_map: "takeWhile P (map f xs) = map f (takeWhile (P \ f) xs)" by (induct xs) auto lemma dropWhile_map: "dropWhile P (map f xs) = map f (dropWhile (P \ f) xs)" by (induct xs) auto lemma takeWhile_eq_take: "takeWhile P xs = take (length (takeWhile P xs)) xs" by (induct xs) auto lemma dropWhile_eq_drop: "dropWhile P xs = drop (length (takeWhile P xs)) xs" by (induct xs) auto lemma hd_dropWhile: "dropWhile P xs \ [] \ \ P (hd (dropWhile P xs))" by (induct xs) auto lemma takeWhile_eq_filter: assumes "\ x. x \ set (dropWhile P xs) \ \ P x" shows "takeWhile P xs = filter P xs" proof - have A: "filter P xs = filter P (takeWhile P xs @ dropWhile P xs)" by simp have B: "filter P (dropWhile P xs) = []" unfolding filter_empty_conv using assms by blast have "filter P xs = takeWhile P xs" unfolding A filter_append B by (auto simp add: filter_id_conv dest: set_takeWhileD) thus ?thesis .. qed lemma takeWhile_eq_take_P_nth: "\ \ i. \ i < n ; i < length xs \ \ P (xs ! i) ; n < length xs \ \ P (xs ! n) \ \ takeWhile P xs = take n xs" proof (induct xs arbitrary: n) case Nil thus ?case by simp next case (Cons x xs) show ?case proof (cases n) case 0 with Cons show ?thesis by simp next case [simp]: (Suc n') have "P x" using Cons.prems(1)[of 0] by simp moreover have "takeWhile P xs = take n' xs" proof (rule Cons.hyps) fix i assume "i < n'" "i < length xs" thus "P (xs ! i)" using Cons.prems(1)[of "Suc i"] by simp next assume "n' < length xs" thus "\ P (xs ! n')" using Cons by auto qed ultimately show ?thesis by simp qed qed lemma nth_length_takeWhile: "length (takeWhile P xs) < length xs \ \ P (xs ! length (takeWhile P xs))" by (induct xs) auto lemma length_takeWhile_less_P_nth: assumes all: "\ i. i < j \ P (xs ! i)" and "j \ length xs" shows "j \ length (takeWhile P xs)" proof (rule classical) assume "\ ?thesis" hence "length (takeWhile P xs) < length xs" using assms by simp thus ?thesis using all \\ ?thesis\ nth_length_takeWhile[of P xs] by auto qed lemma takeWhile_neq_rev: "\distinct xs; x \ set xs\ \ takeWhile (\y. y \ x) (rev xs) = rev (tl (dropWhile (\y. y \ x) xs))" by(induct xs) (auto simp: takeWhile_tail[where l="[]"]) lemma dropWhile_neq_rev: "\distinct xs; x \ set xs\ \ dropWhile (\y. y \ x) (rev xs) = x # rev (takeWhile (\y. y \ x) xs)" proof (induct xs) case (Cons a xs) then show ?case by(auto, subst dropWhile_append2, auto) qed simp lemma takeWhile_not_last: "distinct xs \ takeWhile (\y. y \ last xs) xs = butlast xs" by(induction xs rule: induct_list012) auto lemma takeWhile_cong [fundef_cong]: "\l = k; \x. x \ set l \ P x = Q x\ \ takeWhile P l = takeWhile Q k" by (induct k arbitrary: l) (simp_all) lemma dropWhile_cong [fundef_cong]: "\l = k; \x. x \ set l \ P x = Q x\ \ dropWhile P l = dropWhile Q k" by (induct k arbitrary: l, simp_all) lemma takeWhile_idem [simp]: "takeWhile P (takeWhile P xs) = takeWhile P xs" by (induct xs) auto lemma dropWhile_idem [simp]: "dropWhile P (dropWhile P xs) = dropWhile P xs" by (induct xs) auto subsubsection \\<^const>\zip\\ lemma zip_Nil [simp]: "zip [] ys = []" by (induct ys) auto lemma zip_Cons_Cons [simp]: "zip (x # xs) (y # ys) = (x, y) # zip xs ys" by simp declare zip_Cons [simp del] lemma [code]: "zip [] ys = []" "zip xs [] = []" "zip (x # xs) (y # ys) = (x, y) # zip xs ys" by (fact zip_Nil zip.simps(1) zip_Cons_Cons)+ lemma zip_Cons1: "zip (x#xs) ys = (case ys of [] \ [] | y#ys \ (x,y)#zip xs ys)" by(auto split:list.split) lemma length_zip [simp]: "length (zip xs ys) = min (length xs) (length ys)" by (induct xs ys rule:list_induct2') auto lemma zip_obtain_same_length: assumes "\zs ws n. length zs = length ws \ n = min (length xs) (length ys) \ zs = take n xs \ ws = take n ys \ P (zip zs ws)" shows "P (zip xs ys)" proof - let ?n = "min (length xs) (length ys)" have "P (zip (take ?n xs) (take ?n ys))" by (rule assms) simp_all moreover have "zip xs ys = zip (take ?n xs) (take ?n ys)" proof (induct xs arbitrary: ys) case Nil then show ?case by simp next case (Cons x xs) then show ?case by (cases ys) simp_all qed ultimately show ?thesis by simp qed lemma zip_append1: "zip (xs @ ys) zs = zip xs (take (length xs) zs) @ zip ys (drop (length xs) zs)" by (induct xs zs rule:list_induct2') auto lemma zip_append2: "zip xs (ys @ zs) = zip (take (length ys) xs) ys @ zip (drop (length ys) xs) zs" by (induct xs ys rule:list_induct2') auto lemma zip_append [simp]: "\length xs = length us\ \ zip (xs@ys) (us@vs) = zip xs us @ zip ys vs" by (simp add: zip_append1) lemma zip_rev: "length xs = length ys \ zip (rev xs) (rev ys) = rev (zip xs ys)" by (induct rule:list_induct2, simp_all) lemma zip_map_map: "zip (map f xs) (map g ys) = map (\ (x, y). (f x, g y)) (zip xs ys)" proof (induct xs arbitrary: ys) case (Cons x xs) note Cons_x_xs = Cons.hyps show ?case proof (cases ys) case (Cons y ys') show ?thesis unfolding Cons using Cons_x_xs by simp qed simp qed simp lemma zip_map1: "zip (map f xs) ys = map (\(x, y). (f x, y)) (zip xs ys)" using zip_map_map[of f xs "\x. x" ys] by simp lemma zip_map2: "zip xs (map f ys) = map (\(x, y). (x, f y)) (zip xs ys)" using zip_map_map[of "\x. x" xs f ys] by simp lemma map_zip_map: "map f (zip (map g xs) ys) = map (%(x,y). f(g x, y)) (zip xs ys)" by (auto simp: zip_map1) lemma map_zip_map2: "map f (zip xs (map g ys)) = map (%(x,y). f(x, g y)) (zip xs ys)" by (auto simp: zip_map2) text\Courtesy of Andreas Lochbihler:\ lemma zip_same_conv_map: "zip xs xs = map (\x. (x, x)) xs" by(induct xs) auto lemma nth_zip [simp]: "\i < length xs; i < length ys\ \ (zip xs ys)!i = (xs!i, ys!i)" proof (induct ys arbitrary: i xs) case (Cons y ys) then show ?case by (cases xs) (simp_all add: nth.simps split: nat.split) qed auto lemma set_zip: "set (zip xs ys) = {(xs!i, ys!i) | i. i < min (length xs) (length ys)}" by(simp add: set_conv_nth cong: rev_conj_cong) lemma zip_same: "((a,b) \ set (zip xs xs)) = (a \ set xs \ a = b)" by(induct xs) auto lemma zip_update: "zip (xs[i:=x]) (ys[i:=y]) = (zip xs ys)[i:=(x,y)]" by (simp add: update_zip) lemma zip_replicate [simp]: "zip (replicate i x) (replicate j y) = replicate (min i j) (x,y)" proof (induct i arbitrary: j) case (Suc i) then show ?case by (cases j, auto) qed auto lemma zip_replicate1: "zip (replicate n x) ys = map (Pair x) (take n ys)" by(induction ys arbitrary: n)(case_tac [2] n, simp_all) lemma take_zip: "take n (zip xs ys) = zip (take n xs) (take n ys)" proof (induct n arbitrary: xs ys) case 0 then show ?case by simp next case Suc then show ?case by (cases xs; cases ys) simp_all qed lemma drop_zip: "drop n (zip xs ys) = zip (drop n xs) (drop n ys)" proof (induct n arbitrary: xs ys) case 0 then show ?case by simp next case Suc then show ?case by (cases xs; cases ys) simp_all qed lemma zip_takeWhile_fst: "zip (takeWhile P xs) ys = takeWhile (P \ fst) (zip xs ys)" proof (induct xs arbitrary: ys) case Nil then show ?case by simp next case Cons then show ?case by (cases ys) auto qed lemma zip_takeWhile_snd: "zip xs (takeWhile P ys) = takeWhile (P \ snd) (zip xs ys)" proof (induct xs arbitrary: ys) case Nil then show ?case by simp next case Cons then show ?case by (cases ys) auto qed lemma set_zip_leftD: "(x,y)\ set (zip xs ys) \ x \ set xs" by (induct xs ys rule:list_induct2') auto lemma set_zip_rightD: "(x,y)\ set (zip xs ys) \ y \ set ys" by (induct xs ys rule:list_induct2') auto lemma in_set_zipE: "(x,y) \ set(zip xs ys) \ (\ x \ set xs; y \ set ys \ \ R) \ R" by(blast dest: set_zip_leftD set_zip_rightD) lemma zip_map_fst_snd: "zip (map fst zs) (map snd zs) = zs" by (induct zs) simp_all lemma zip_eq_conv: "length xs = length ys \ zip xs ys = zs \ map fst zs = xs \ map snd zs = ys" by (auto simp add: zip_map_fst_snd) lemma in_set_zip: "p \ set (zip xs ys) \ (\n. xs ! n = fst p \ ys ! n = snd p \ n < length xs \ n < length ys)" by (cases p) (auto simp add: set_zip) lemma in_set_impl_in_set_zip1: assumes "length xs = length ys" assumes "x \ set xs" obtains y where "(x, y) \ set (zip xs ys)" proof - from assms have "x \ set (map fst (zip xs ys))" by simp from this that show ?thesis by fastforce qed lemma in_set_impl_in_set_zip2: assumes "length xs = length ys" assumes "y \ set ys" obtains x where "(x, y) \ set (zip xs ys)" proof - from assms have "y \ set (map snd (zip xs ys))" by simp from this that show ?thesis by fastforce qed lemma zip_eq_Nil_iff[simp]: "zip xs ys = [] \ xs = [] \ ys = []" by (cases xs; cases ys) simp_all lemmas Nil_eq_zip_iff[simp] = zip_eq_Nil_iff[THEN eq_iff_swap] lemma zip_eq_ConsE: assumes "zip xs ys = xy # xys" obtains x xs' y ys' where "xs = x # xs'" and "ys = y # ys'" and "xy = (x, y)" and "xys = zip xs' ys'" proof - from assms have "xs \ []" and "ys \ []" using zip_eq_Nil_iff [of xs ys] by simp_all then obtain x xs' y ys' where xs: "xs = x # xs'" and ys: "ys = y # ys'" by (cases xs; cases ys) auto with assms have "xy = (x, y)" and "xys = zip xs' ys'" by simp_all with xs ys show ?thesis .. qed lemma semilattice_map2: "semilattice (map2 (\<^bold>*))" if "semilattice (\<^bold>*)" for f (infixl "\<^bold>*" 70) proof - from that interpret semilattice f . show ?thesis proof show "map2 (\<^bold>*) (map2 (\<^bold>*) xs ys) zs = map2 (\<^bold>*) xs (map2 (\<^bold>*) ys zs)" for xs ys zs :: "'a list" proof (induction "zip xs (zip ys zs)" arbitrary: xs ys zs) case Nil from Nil [symmetric] show ?case by (auto simp add: zip_eq_Nil_iff) next case (Cons xyz xyzs) from Cons.hyps(2) [symmetric] show ?case by (rule zip_eq_ConsE) (erule zip_eq_ConsE, auto intro: Cons.hyps(1) simp add: ac_simps) qed show "map2 (\<^bold>*) xs ys = map2 (\<^bold>*) ys xs" for xs ys :: "'a list" proof (induction "zip xs ys" arbitrary: xs ys) case Nil then show ?case by (auto simp add: zip_eq_Nil_iff dest: sym) next case (Cons xy xys) from Cons.hyps(2) [symmetric] show ?case by (rule zip_eq_ConsE) (auto intro: Cons.hyps(1) simp add: ac_simps) qed show "map2 (\<^bold>*) xs xs = xs" for xs :: "'a list" by (induction xs) simp_all qed qed lemma pair_list_eqI: assumes "map fst xs = map fst ys" and "map snd xs = map snd ys" shows "xs = ys" proof - from assms(1) have "length xs = length ys" by (rule map_eq_imp_length_eq) from this assms show ?thesis by (induct xs ys rule: list_induct2) (simp_all add: prod_eqI) qed lemma hd_zip: \hd (zip xs ys) = (hd xs, hd ys)\ if \xs \ []\ and \ys \ []\ using that by (cases xs; cases ys) simp_all lemma last_zip: \last (zip xs ys) = (last xs, last ys)\ if \xs \ []\ and \ys \ []\ and \length xs = length ys\ using that by (cases xs rule: rev_cases; cases ys rule: rev_cases) simp_all subsubsection \\<^const>\list_all2\\ lemma list_all2_lengthD [intro?]: "list_all2 P xs ys \ length xs = length ys" by (simp add: list_all2_iff) lemma list_all2_Nil [iff, code]: "list_all2 P [] ys = (ys = [])" by (simp add: list_all2_iff) lemma list_all2_Nil2 [iff, code]: "list_all2 P xs [] = (xs = [])" by (simp add: list_all2_iff) lemma list_all2_Cons [iff, code]: "list_all2 P (x # xs) (y # ys) = (P x y \ list_all2 P xs ys)" by (auto simp add: list_all2_iff) lemma list_all2_Cons1: "list_all2 P (x # xs) ys = (\z zs. ys = z # zs \ P x z \ list_all2 P xs zs)" by (cases ys) auto lemma list_all2_Cons2: "list_all2 P xs (y # ys) = (\z zs. xs = z # zs \ P z y \ list_all2 P zs ys)" by (cases xs) auto lemma list_all2_induct [consumes 1, case_names Nil Cons, induct set: list_all2]: assumes P: "list_all2 P xs ys" assumes Nil: "R [] []" assumes Cons: "\x xs y ys. \P x y; list_all2 P xs ys; R xs ys\ \ R (x # xs) (y # ys)" shows "R xs ys" using P by (induct xs arbitrary: ys) (auto simp add: list_all2_Cons1 Nil Cons) lemma list_all2_rev [iff]: "list_all2 P (rev xs) (rev ys) = list_all2 P xs ys" by (simp add: list_all2_iff zip_rev cong: conj_cong) lemma list_all2_rev1: "list_all2 P (rev xs) ys = list_all2 P xs (rev ys)" by (subst list_all2_rev [symmetric]) simp lemma list_all2_append1: "list_all2 P (xs @ ys) zs = (\us vs. zs = us @ vs \ length us = length xs \ length vs = length ys \ list_all2 P xs us \ list_all2 P ys vs)" (is "?lhs = ?rhs") proof assume ?lhs then show ?rhs apply (rule_tac x = "take (length xs) zs" in exI) apply (rule_tac x = "drop (length xs) zs" in exI) apply (force split: nat_diff_split simp add: list_all2_iff zip_append1) done next assume ?rhs then show ?lhs by (auto simp add: list_all2_iff) qed lemma list_all2_append2: "list_all2 P xs (ys @ zs) = (\us vs. xs = us @ vs \ length us = length ys \ length vs = length zs \ list_all2 P us ys \ list_all2 P vs zs)" (is "?lhs = ?rhs") proof assume ?lhs then show ?rhs apply (rule_tac x = "take (length ys) xs" in exI) apply (rule_tac x = "drop (length ys) xs" in exI) apply (force split: nat_diff_split simp add: list_all2_iff zip_append2) done next assume ?rhs then show ?lhs by (auto simp add: list_all2_iff) qed lemma list_all2_append: "length xs = length ys \ list_all2 P (xs@us) (ys@vs) = (list_all2 P xs ys \ list_all2 P us vs)" by (induct rule:list_induct2, simp_all) lemma list_all2_appendI [intro?, trans]: "\ list_all2 P a b; list_all2 P c d \ \ list_all2 P (a@c) (b@d)" by (simp add: list_all2_append list_all2_lengthD) lemma list_all2_conv_all_nth: "list_all2 P xs ys = (length xs = length ys \ (\i < length xs. P (xs!i) (ys!i)))" by (force simp add: list_all2_iff set_zip) lemma list_all2_trans: assumes tr: "!!a b c. P1 a b \ P2 b c \ P3 a c" shows "!!bs cs. list_all2 P1 as bs \ list_all2 P2 bs cs \ list_all2 P3 as cs" (is "!!bs cs. PROP ?Q as bs cs") proof (induct as) fix x xs bs assume I1: "!!bs cs. PROP ?Q xs bs cs" show "!!cs. PROP ?Q (x # xs) bs cs" proof (induct bs) fix y ys cs assume I2: "!!cs. PROP ?Q (x # xs) ys cs" show "PROP ?Q (x # xs) (y # ys) cs" by (induct cs) (auto intro: tr I1 I2) qed simp qed simp lemma list_all2_all_nthI [intro?]: "length a = length b \ (\n. n < length a \ P (a!n) (b!n)) \ list_all2 P a b" by (simp add: list_all2_conv_all_nth) lemma list_all2I: "\x \ set (zip a b). case_prod P x \ length a = length b \ list_all2 P a b" by (simp add: list_all2_iff) lemma list_all2_nthD: "\ list_all2 P xs ys; p < size xs \ \ P (xs!p) (ys!p)" by (simp add: list_all2_conv_all_nth) lemma list_all2_nthD2: "\list_all2 P xs ys; p < size ys\ \ P (xs!p) (ys!p)" by (frule list_all2_lengthD) (auto intro: list_all2_nthD) lemma list_all2_map1: "list_all2 P (map f as) bs = list_all2 (\x y. P (f x) y) as bs" by (simp add: list_all2_conv_all_nth) lemma list_all2_map2: "list_all2 P as (map f bs) = list_all2 (\x y. P x (f y)) as bs" by (auto simp add: list_all2_conv_all_nth) lemma list_all2_refl [intro?]: "(\x. P x x) \ list_all2 P xs xs" by (simp add: list_all2_conv_all_nth) lemma list_all2_update_cong: "\ list_all2 P xs ys; P x y \ \ list_all2 P (xs[i:=x]) (ys[i:=y])" by (cases "i < length ys") (auto simp add: list_all2_conv_all_nth nth_list_update) lemma list_all2_takeI [simp,intro?]: "list_all2 P xs ys \ list_all2 P (take n xs) (take n ys)" proof (induct xs arbitrary: n ys) case (Cons x xs) then show ?case by (cases n) (auto simp: list_all2_Cons1) qed auto lemma list_all2_dropI [simp,intro?]: "list_all2 P xs ys \ list_all2 P (drop n xs) (drop n ys)" proof (induct xs arbitrary: n ys) case (Cons x xs) then show ?case by (cases n) (auto simp: list_all2_Cons1) qed auto lemma list_all2_mono [intro?]: "list_all2 P xs ys \ (\xs ys. P xs ys \ Q xs ys) \ list_all2 Q xs ys" by (rule list.rel_mono_strong) lemma list_all2_eq: "xs = ys \ list_all2 (=) xs ys" by (induct xs ys rule: list_induct2') auto lemma list_eq_iff_zip_eq: "xs = ys \ length xs = length ys \ (\(x,y) \ set (zip xs ys). x = y)" by(auto simp add: set_zip list_all2_eq list_all2_conv_all_nth cong: conj_cong) lemma list_all2_same: "list_all2 P xs xs \ (\x\set xs. P x x)" by(auto simp add: list_all2_conv_all_nth set_conv_nth) lemma zip_assoc: "zip xs (zip ys zs) = map (\((x, y), z). (x, y, z)) (zip (zip xs ys) zs)" by(rule list_all2_all_nthI[where P="(=)", unfolded list.rel_eq]) simp_all lemma zip_commute: "zip xs ys = map (\(x, y). (y, x)) (zip ys xs)" by(rule list_all2_all_nthI[where P="(=)", unfolded list.rel_eq]) simp_all lemma zip_left_commute: "zip xs (zip ys zs) = map (\(y, (x, z)). (x, y, z)) (zip ys (zip xs zs))" by(rule list_all2_all_nthI[where P="(=)", unfolded list.rel_eq]) simp_all lemma zip_replicate2: "zip xs (replicate n y) = map (\x. (x, y)) (take n xs)" by(subst zip_commute)(simp add: zip_replicate1) subsubsection \\<^const>\List.product\ and \<^const>\product_lists\\ lemma product_concat_map: "List.product xs ys = concat (map (\x. map (\y. (x,y)) ys) xs)" by(induction xs) (simp)+ lemma set_product[simp]: "set (List.product xs ys) = set xs \ set ys" by (induct xs) auto lemma length_product [simp]: "length (List.product xs ys) = length xs * length ys" by (induct xs) simp_all lemma product_nth: assumes "n < length xs * length ys" shows "List.product xs ys ! n = (xs ! (n div length ys), ys ! (n mod length ys))" using assms proof (induct xs arbitrary: n) case Nil then show ?case by simp next case (Cons x xs n) then have "length ys > 0" by auto with Cons show ?case by (auto simp add: nth_append not_less le_mod_geq le_div_geq) qed lemma in_set_product_lists_length: "xs \ set (product_lists xss) \ length xs = length xss" by (induct xss arbitrary: xs) auto lemma product_lists_set: "set (product_lists xss) = {xs. list_all2 (\x ys. x \ set ys) xs xss}" (is "?L = Collect ?R") proof (intro equalityI subsetI, unfold mem_Collect_eq) fix xs assume "xs \ ?L" then have "length xs = length xss" by (rule in_set_product_lists_length) from this \xs \ ?L\ show "?R xs" by (induct xs xss rule: list_induct2) auto next fix xs assume "?R xs" then show "xs \ ?L" by induct auto qed subsubsection \\<^const>\fold\ with natural argument order\ lemma fold_simps [code]: \ \eta-expanded variant for generated code -- enables tail-recursion optimisation in Scala\ "fold f [] s = s" "fold f (x # xs) s = fold f xs (f x s)" by simp_all lemma fold_remove1_split: "\ \x y. x \ set xs \ y \ set xs \ f x \ f y = f y \ f x; x \ set xs \ \ fold f xs = fold f (remove1 x xs) \ f x" by (induct xs) (auto simp add: comp_assoc) lemma fold_cong [fundef_cong]: "a = b \ xs = ys \ (\x. x \ set xs \ f x = g x) \ fold f xs a = fold g ys b" by (induct ys arbitrary: a b xs) simp_all lemma fold_id: "(\x. x \ set xs \ f x = id) \ fold f xs = id" by (induct xs) simp_all lemma fold_commute: "(\x. x \ set xs \ h \ g x = f x \ h) \ h \ fold g xs = fold f xs \ h" by (induct xs) (simp_all add: fun_eq_iff) lemma fold_commute_apply: assumes "\x. x \ set xs \ h \ g x = f x \ h" shows "h (fold g xs s) = fold f xs (h s)" proof - from assms have "h \ fold g xs = fold f xs \ h" by (rule fold_commute) then show ?thesis by (simp add: fun_eq_iff) qed lemma fold_invariant: "\ \x. x \ set xs \ Q x; P s; \x s. Q x \ P s \ P (f x s) \ \ P (fold f xs s)" by (induct xs arbitrary: s) simp_all lemma fold_append [simp]: "fold f (xs @ ys) = fold f ys \ fold f xs" by (induct xs) simp_all lemma fold_map [code_unfold]: "fold g (map f xs) = fold (g \ f) xs" by (induct xs) simp_all lemma fold_filter: "fold f (filter P xs) = fold (\x. if P x then f x else id) xs" by (induct xs) simp_all lemma fold_rev: "(\x y. x \ set xs \ y \ set xs \ f y \ f x = f x \ f y) \ fold f (rev xs) = fold f xs" by (induct xs) (simp_all add: fold_commute_apply fun_eq_iff) lemma fold_Cons_rev: "fold Cons xs = append (rev xs)" by (induct xs) simp_all lemma rev_conv_fold [code]: "rev xs = fold Cons xs []" by (simp add: fold_Cons_rev) lemma fold_append_concat_rev: "fold append xss = append (concat (rev xss))" by (induct xss) simp_all text \\<^const>\Finite_Set.fold\ and \<^const>\fold\\ lemma (in comp_fun_commute_on) fold_set_fold_remdups: assumes "set xs \ S" shows "Finite_Set.fold f y (set xs) = fold f (remdups xs) y" by (rule sym, use assms in \induct xs arbitrary: y\) (simp_all add: insert_absorb fold_fun_left_comm) lemma (in comp_fun_idem_on) fold_set_fold: assumes "set xs \ S" shows "Finite_Set.fold f y (set xs) = fold f xs y" by (rule sym, use assms in \induct xs arbitrary: y\) (simp_all add: fold_fun_left_comm) lemma union_set_fold [code]: "set xs \ A = fold Set.insert xs A" proof - interpret comp_fun_idem Set.insert by (fact comp_fun_idem_insert) show ?thesis by (simp add: union_fold_insert fold_set_fold) qed lemma union_coset_filter [code]: "List.coset xs \ A = List.coset (List.filter (\x. x \ A) xs)" by auto lemma minus_set_fold [code]: "A - set xs = fold Set.remove xs A" proof - interpret comp_fun_idem Set.remove by (fact comp_fun_idem_remove) show ?thesis by (simp add: minus_fold_remove [of _ A] fold_set_fold) qed lemma minus_coset_filter [code]: "A - List.coset xs = set (List.filter (\x. x \ A) xs)" by auto lemma inter_set_filter [code]: "A \ set xs = set (List.filter (\x. x \ A) xs)" by auto lemma inter_coset_fold [code]: "A \ List.coset xs = fold Set.remove xs A" by (simp add: Diff_eq [symmetric] minus_set_fold) lemma (in semilattice_set) set_eq_fold [code]: "F (set (x # xs)) = fold f xs x" proof - interpret comp_fun_idem f by standard (simp_all add: fun_eq_iff left_commute) show ?thesis by (simp add: eq_fold fold_set_fold) qed lemma (in complete_lattice) Inf_set_fold: "Inf (set xs) = fold inf xs top" proof - interpret comp_fun_idem "inf :: 'a \ 'a \ 'a" by (fact comp_fun_idem_inf) show ?thesis by (simp add: Inf_fold_inf fold_set_fold inf_commute) qed declare Inf_set_fold [where 'a = "'a set", code] lemma (in complete_lattice) Sup_set_fold: "Sup (set xs) = fold sup xs bot" proof - interpret comp_fun_idem "sup :: 'a \ 'a \ 'a" by (fact comp_fun_idem_sup) show ?thesis by (simp add: Sup_fold_sup fold_set_fold sup_commute) qed declare Sup_set_fold [where 'a = "'a set", code] lemma (in complete_lattice) INF_set_fold: "\(f ` set xs) = fold (inf \ f) xs top" using Inf_set_fold [of "map f xs"] by (simp add: fold_map) lemma (in complete_lattice) SUP_set_fold: "\(f ` set xs) = fold (sup \ f) xs bot" using Sup_set_fold [of "map f xs"] by (simp add: fold_map) subsubsection \Fold variants: \<^const>\foldr\ and \<^const>\foldl\\ text \Correspondence\ lemma foldr_conv_fold [code_abbrev]: "foldr f xs = fold f (rev xs)" by (induct xs) simp_all lemma foldl_conv_fold: "foldl f s xs = fold (\x s. f s x) xs s" by (induct xs arbitrary: s) simp_all lemma foldr_conv_foldl: \ \The ``Third Duality Theorem'' in Bird \& Wadler:\ "foldr f xs a = foldl (\x y. f y x) a (rev xs)" by (simp add: foldr_conv_fold foldl_conv_fold) lemma foldl_conv_foldr: "foldl f a xs = foldr (\x y. f y x) (rev xs) a" by (simp add: foldr_conv_fold foldl_conv_fold) lemma foldr_fold: "(\x y. x \ set xs \ y \ set xs \ f y \ f x = f x \ f y) \ foldr f xs = fold f xs" unfolding foldr_conv_fold by (rule fold_rev) lemma foldr_cong [fundef_cong]: "a = b \ l = k \ (\a x. x \ set l \ f x a = g x a) \ foldr f l a = foldr g k b" by (auto simp add: foldr_conv_fold intro!: fold_cong) lemma foldl_cong [fundef_cong]: "a = b \ l = k \ (\a x. x \ set l \ f a x = g a x) \ foldl f a l = foldl g b k" by (auto simp add: foldl_conv_fold intro!: fold_cong) lemma foldr_append [simp]: "foldr f (xs @ ys) a = foldr f xs (foldr f ys a)" by (simp add: foldr_conv_fold) lemma foldl_append [simp]: "foldl f a (xs @ ys) = foldl f (foldl f a xs) ys" by (simp add: foldl_conv_fold) lemma foldr_map [code_unfold]: "foldr g (map f xs) a = foldr (g \ f) xs a" by (simp add: foldr_conv_fold fold_map rev_map) lemma foldr_filter: "foldr f (filter P xs) = foldr (\x. if P x then f x else id) xs" by (simp add: foldr_conv_fold rev_filter fold_filter) lemma foldl_map [code_unfold]: "foldl g a (map f xs) = foldl (\a x. g a (f x)) a xs" by (simp add: foldl_conv_fold fold_map comp_def) lemma concat_conv_foldr [code]: "concat xss = foldr append xss []" by (simp add: fold_append_concat_rev foldr_conv_fold) subsubsection \\<^const>\upt\\ lemma upt_rec[code]: "[i.. \simp does not terminate!\ by (induct j) auto lemmas upt_rec_numeral[simp] = upt_rec[of "numeral m" "numeral n"] for m n lemma upt_conv_Nil [simp]: "j \ i \ [i.. j \ i)" by(induct j)simp_all lemma upt_eq_Cons_conv: "([i.. i = x \ [i+1.. j \ [i..<(Suc j)] = [i.. \Only needed if \upt_Suc\ is deleted from the simpset.\ by simp lemma upt_conv_Cons: "i < j \ [i.. \no precondition\ "m # n # ns = [m.. n # ns = [Suc m.. [i.. \LOOPS as a simprule, since \j \ j\.\ by (induct k) auto lemma length_upt [simp]: "length [i.. [i.. hd[i.. last[i.. n \ take m [i..i. i + n) [0.. (map f [m..n. n - Suc 0) [Suc m..i. f (Suc i)) [0 ..< n]" by (induct n arbitrary: f) auto lemma nth_take_lemma: "k \ length xs \ k \ length ys \ (\i. i < k \ xs!i = ys!i) \ take k xs = take k ys" proof (induct k arbitrary: xs ys) case (Suc k) then show ?case apply (simp add: less_Suc_eq_0_disj) by (simp add: Suc.prems(3) take_Suc_conv_app_nth) qed simp lemma nth_equalityI: "\length xs = length ys; \i. i < length xs \ xs!i = ys!i\ \ xs = ys" by (frule nth_take_lemma [OF le_refl eq_imp_le]) simp_all lemma map_nth: "map (\i. xs ! i) [0.. (\x y. \P x y; Q y x\ \ x = y); list_all2 P xs ys; list_all2 Q ys xs \ \ xs = ys" by (simp add: list_all2_conv_all_nth nth_equalityI) lemma take_equalityI: "(\i. take i xs = take i ys) \ xs = ys" \ \The famous take-lemma.\ by (metis length_take min.commute order_refl take_all) lemma take_Cons': "take n (x # xs) = (if n = 0 then [] else x # take (n - 1) xs)" by (cases n) simp_all lemma drop_Cons': "drop n (x # xs) = (if n = 0 then x # xs else drop (n - 1) xs)" by (cases n) simp_all lemma nth_Cons': "(x # xs)!n = (if n = 0 then x else xs!(n - 1))" by (cases n) simp_all lemma take_Cons_numeral [simp]: "take (numeral v) (x # xs) = x # take (numeral v - 1) xs" by (simp add: take_Cons') lemma drop_Cons_numeral [simp]: "drop (numeral v) (x # xs) = drop (numeral v - 1) xs" by (simp add: drop_Cons') lemma nth_Cons_numeral [simp]: "(x # xs) ! numeral v = xs ! (numeral v - 1)" by (simp add: nth_Cons') lemma map_upt_eqI: \map f [m.. if \length xs = n - m\ \\i. i < length xs \ xs ! i = f (m + i)\ proof (rule nth_equalityI) from \length xs = n - m\ show \length (map f [m.. by simp next fix i assume \i < length (map f [m.. then have \i < n - m\ by simp with that have \xs ! i = f (m + i)\ by simp with \i < n - m\ show \map f [m.. by simp qed subsubsection \\upto\: interval-list on \<^typ>\int\\ function upto :: "int \ int \ int list" ("(1[_../_])") where "upto i j = (if i \ j then i # [i+1..j] else [])" by auto termination by(relation "measure(%(i::int,j). nat(j - i + 1))") auto declare upto.simps[simp del] lemmas upto_rec_numeral [simp] = upto.simps[of "numeral m" "numeral n"] upto.simps[of "numeral m" "- numeral n"] upto.simps[of "- numeral m" "numeral n"] upto.simps[of "- numeral m" "- numeral n"] for m n lemma upto_empty[simp]: "j < i \ [i..j] = []" by(simp add: upto.simps) lemma upto_single[simp]: "[i..i] = [i]" by(simp add: upto.simps) lemma upto_Nil[simp]: "[i..j] = [] \ j < i" by (simp add: upto.simps) lemmas upto_Nil2[simp] = upto_Nil[THEN eq_iff_swap] lemma upto_rec1: "i \ j \ [i..j] = i#[i+1..j]" by(simp add: upto.simps) lemma upto_rec2: "i \ j \ [i..j] = [i..j - 1]@[j]" proof(induct "nat(j-i)" arbitrary: i j) case 0 thus ?case by(simp add: upto.simps) next case (Suc n) hence "n = nat (j - (i + 1))" "i < j" by linarith+ from this(2) Suc.hyps(1)[OF this(1)] Suc(2,3) upto_rec1 show ?case by simp qed lemma length_upto[simp]: "length [i..j] = nat(j - i + 1)" by(induction i j rule: upto.induct) (auto simp: upto.simps) lemma set_upto[simp]: "set[i..j] = {i..j}" proof(induct i j rule:upto.induct) case (1 i j) from this show ?case unfolding upto.simps[of i j] by auto qed lemma nth_upto[simp]: "i + int k \ j \ [i..j] ! k = i + int k" proof(induction i j arbitrary: k rule: upto.induct) case (1 i j) then show ?case by (auto simp add: upto_rec1 [of i j] nth_Cons') qed lemma upto_split1: "i \ j \ j \ k \ [i..k] = [i..j-1] @ [j..k]" proof (induction j rule: int_ge_induct) case base thus ?case by (simp add: upto_rec1) next case step thus ?case using upto_rec1 upto_rec2 by simp qed lemma upto_split2: "i \ j \ j \ k \ [i..k] = [i..j] @ [j+1..k]" using upto_rec1 upto_rec2 upto_split1 by auto lemma upto_split3: "\ i \ j; j \ k \ \ [i..k] = [i..j-1] @ j # [j+1..k]" using upto_rec1 upto_split1 by auto text\Tail recursive version for code generation:\ definition upto_aux :: "int \ int \ int list \ int list" where "upto_aux i j js = [i..j] @ js" lemma upto_aux_rec [code]: "upto_aux i j js = (if j\<^const>\successively\\ lemma successively_Cons: "successively P (x # xs) \ xs = [] \ P x (hd xs) \ successively P xs" by (cases xs) auto lemma successively_cong [cong]: assumes "\x y. x \ set xs \ y \ set xs \ P x y \ Q x y" "xs = ys" shows "successively P xs \ successively Q ys" unfolding assms(2) [symmetric] using assms(1) by (induction xs) (auto simp: successively_Cons) lemma successively_append_iff: "successively P (xs @ ys) \ successively P xs \ successively P ys \ (xs = [] \ ys = [] \ P (last xs) (hd ys))" by (induction xs) (auto simp: successively_Cons) lemma successively_if_sorted_wrt: "sorted_wrt P xs \ successively P xs" by (induction xs rule: induct_list012) auto lemma successively_iff_sorted_wrt_strong: assumes "\x y z. x \ set xs \ y \ set xs \ z \ set xs \ P x y \ P y z \ P x z" shows "successively P xs \ sorted_wrt P xs" proof assume "successively P xs" from this and assms show "sorted_wrt P xs" proof (induction xs rule: induct_list012) case (3 x y xs) from "3.prems" have "P x y" by auto have IH: "sorted_wrt P (y # xs)" using "3.prems" by(intro "3.IH"(2) list.set_intros(2))(simp, blast intro: list.set_intros(2)) have "P x z" if asm: "z \ set xs" for z proof - from IH and asm have "P y z" by auto with \P x y\ show "P x z" using "3.prems" asm by auto qed with IH and \P x y\ show ?case by auto qed auto qed (use successively_if_sorted_wrt in blast) lemma successively_conv_sorted_wrt: assumes "transp P" shows "successively P xs \ sorted_wrt P xs" using assms unfolding transp_def by (intro successively_iff_sorted_wrt_strong) blast lemma successively_rev [simp]: "successively P (rev xs) \ successively (\x y. P y x) xs" by (induction xs rule: remdups_adj.induct) (auto simp: successively_append_iff successively_Cons) lemma successively_map: "successively P (map f xs) \ successively (\x y. P (f x) (f y)) xs" by (induction xs rule: induct_list012) auto lemma successively_mono: assumes "successively P xs" assumes "\x y. x \ set xs \ y \ set xs \ P x y \ Q x y" shows "successively Q xs" using assms by (induction Q xs rule: successively.induct) auto lemma successively_altdef: "successively = (\P. rec_list True (\x xs b. case xs of [] \ True | y # _ \ P x y \ b))" proof (intro ext) fix P and xs :: "'a list" show "successively P xs = rec_list True (\x xs b. case xs of [] \ True | y # _ \ P x y \ b) xs" by (induction xs) (auto simp: successively_Cons split: list.splits) qed subsubsection \\<^const>\distinct\ and \<^const>\remdups\ and \<^const>\remdups_adj\\ lemma distinct_tl: "distinct xs \ distinct (tl xs)" by (cases xs) simp_all lemma distinct_append [simp]: "distinct (xs @ ys) = (distinct xs \ distinct ys \ set xs \ set ys = {})" by (induct xs) auto lemma distinct_rev[simp]: "distinct(rev xs) = distinct xs" by(induct xs) auto lemma set_remdups [simp]: "set (remdups xs) = set xs" by (induct xs) (auto simp add: insert_absorb) lemma distinct_remdups [iff]: "distinct (remdups xs)" by (induct xs) auto lemma distinct_remdups_id: "distinct xs \ remdups xs = xs" by (induct xs, auto) lemma remdups_id_iff_distinct [simp]: "remdups xs = xs \ distinct xs" by (metis distinct_remdups distinct_remdups_id) lemma finite_distinct_list: "finite A \ \xs. set xs = A \ distinct xs" by (metis distinct_remdups finite_list set_remdups) lemma remdups_eq_nil_iff [simp]: "(remdups x = []) = (x = [])" by (induct x, auto) lemmas remdups_eq_nil_right_iff [simp] = remdups_eq_nil_iff[THEN eq_iff_swap] lemma length_remdups_leq[iff]: "length(remdups xs) \ length xs" by (induct xs) auto lemma length_remdups_eq[iff]: "(length (remdups xs) = length xs) = (remdups xs = xs)" proof (induct xs) case (Cons a xs) then show ?case by simp (metis Suc_n_not_le_n impossible_Cons length_remdups_leq) qed auto lemma remdups_filter: "remdups(filter P xs) = filter P (remdups xs)" by (induct xs) auto lemma distinct_map: "distinct(map f xs) = (distinct xs \ inj_on f (set xs))" by (induct xs) auto lemma distinct_map_filter: "distinct (map f xs) \ distinct (map f (filter P xs))" by (induct xs) auto lemma distinct_filter [simp]: "distinct xs \ distinct (filter P xs)" by (induct xs) auto lemma distinct_upt[simp]: "distinct[i.. distinct (take i xs)" proof (induct xs arbitrary: i) case (Cons a xs) then show ?case by (metis Cons.prems append_take_drop_id distinct_append) qed auto lemma distinct_drop[simp]: "distinct xs \ distinct (drop i xs)" proof (induct xs arbitrary: i) case (Cons a xs) then show ?case by (metis Cons.prems append_take_drop_id distinct_append) qed auto lemma distinct_list_update: assumes d: "distinct xs" and a: "a \ set xs - {xs!i}" shows "distinct (xs[i:=a])" proof (cases "i < length xs") case True with a have anot: "a \ set (take i xs @ xs ! i # drop (Suc i) xs) - {xs!i}" by simp (metis in_set_dropD in_set_takeD) show ?thesis proof (cases "a = xs!i") case True with d show ?thesis by auto next case False have "set (take i xs) \ set (drop (Suc i) xs) = {}" by (metis True d disjoint_insert(1) distinct_append id_take_nth_drop list.set(2)) then show ?thesis using d False anot \i < length xs\ by (simp add: upd_conv_take_nth_drop) qed next case False with d show ?thesis by auto qed lemma distinct_concat: "\ distinct xs; \ ys. ys \ set xs \ distinct ys; \ ys zs. \ ys \ set xs ; zs \ set xs ; ys \ zs \ \ set ys \ set zs = {} \ \ distinct (concat xs)" by (induct xs) auto text \An iff-version of @{thm distinct_concat} is available further down as \distinct_concat_iff\.\ text \It is best to avoid the following indexed version of distinct, but sometimes it is useful.\ lemma distinct_conv_nth: "distinct xs = (\i < size xs. \j < size xs. i \ j \ xs!i \ xs!j)" proof (induct xs) case (Cons x xs) show ?case apply (auto simp add: Cons nth_Cons split: nat.split_asm) apply (metis Suc_less_eq2 in_set_conv_nth less_not_refl zero_less_Suc)+ done qed auto lemma nth_eq_iff_index_eq: "\ distinct xs; i < length xs; j < length xs \ \ (xs!i = xs!j) = (i = j)" by(auto simp: distinct_conv_nth) lemma distinct_Ex1: "distinct xs \ x \ set xs \ (\!i. i < length xs \ xs ! i = x)" by (auto simp: in_set_conv_nth nth_eq_iff_index_eq) lemma inj_on_nth: "distinct xs \ \i \ I. i < length xs \ inj_on (nth xs) I" by (rule inj_onI) (simp add: nth_eq_iff_index_eq) lemma bij_betw_nth: assumes "distinct xs" "A = {.. distinct xs; n < length xs \ \ set(xs[n := x]) = insert x (set xs - {xs!n})" by(auto simp: set_eq_iff in_set_conv_nth nth_list_update nth_eq_iff_index_eq) lemma distinct_swap[simp]: "\ i < size xs; j < size xs\ \ distinct(xs[i := xs!j, j := xs!i]) = distinct xs" apply (simp add: distinct_conv_nth nth_list_update) apply (safe; metis) done lemma set_swap[simp]: "\ i < size xs; j < size xs \ \ set(xs[i := xs!j, j := xs!i]) = set xs" by(simp add: set_conv_nth nth_list_update) metis lemma distinct_card: "distinct xs \ card (set xs) = size xs" by (induct xs) auto lemma card_distinct: "card (set xs) = size xs \ distinct xs" proof (induct xs) case (Cons x xs) show ?case proof (cases "x \ set xs") case False with Cons show ?thesis by simp next case True with Cons.prems have "card (set xs) = Suc (length xs)" by (simp add: card_insert_if split: if_split_asm) moreover have "card (set xs) \ length xs" by (rule card_length) ultimately have False by simp thus ?thesis .. qed qed simp lemma distinct_length_filter: "distinct xs \ length (filter P xs) = card ({x. P x} Int set xs)" by (induct xs) (auto) lemma not_distinct_decomp: "\ distinct ws \ \xs ys zs y. ws = xs@[y]@ys@[y]@zs" proof (induct n == "length ws" arbitrary:ws) case (Suc n ws) then show ?case using length_Suc_conv [of ws n] apply (auto simp: eq_commute) apply (metis append_Nil in_set_conv_decomp_first) by (metis append_Cons) qed simp lemma not_distinct_conv_prefix: defines "dec as xs y ys \ y \ set xs \ distinct xs \ as = xs @ y # ys" shows "\distinct as \ (\xs y ys. dec as xs y ys)" (is "?L = ?R") proof assume "?L" then show "?R" proof (induct "length as" arbitrary: as rule: less_induct) case less obtain xs ys zs y where decomp: "as = (xs @ y # ys) @ y # zs" using not_distinct_decomp[OF less.prems] by auto show ?case proof (cases "distinct (xs @ y # ys)") case True with decomp have "dec as (xs @ y # ys) y zs" by (simp add: dec_def) then show ?thesis by blast next case False with less decomp obtain xs' y' ys' where "dec (xs @ y # ys) xs' y' ys'" by atomize_elim auto with decomp have "dec as xs' y' (ys' @ y # zs)" by (simp add: dec_def) then show ?thesis by blast qed qed qed (auto simp: dec_def) lemma distinct_product: "distinct xs \ distinct ys \ distinct (List.product xs ys)" by (induct xs) (auto intro: inj_onI simp add: distinct_map) lemma distinct_product_lists: assumes "\xs \ set xss. distinct xs" shows "distinct (product_lists xss)" using assms proof (induction xss) case (Cons xs xss) note * = this then show ?case proof (cases "product_lists xss") case Nil then show ?thesis by (induct xs) simp_all next case (Cons ps pss) with * show ?thesis by (auto intro!: inj_onI distinct_concat simp add: distinct_map) qed qed simp lemma length_remdups_concat: "length (remdups (concat xss)) = card (\xs\set xss. set xs)" by (simp add: distinct_card [symmetric]) lemma remdups_append2: "remdups (xs @ remdups ys) = remdups (xs @ ys)" by(induction xs) auto lemma length_remdups_card_conv: "length(remdups xs) = card(set xs)" proof - have xs: "concat[xs] = xs" by simp from length_remdups_concat[of "[xs]"] show ?thesis unfolding xs by simp qed lemma remdups_remdups: "remdups (remdups xs) = remdups xs" by (induct xs) simp_all lemma distinct_butlast: assumes "distinct xs" shows "distinct (butlast xs)" proof (cases "xs = []") case False from \xs \ []\ obtain ys y where "xs = ys @ [y]" by (cases xs rule: rev_cases) auto with \distinct xs\ show ?thesis by simp qed (auto) lemma remdups_map_remdups: "remdups (map f (remdups xs)) = remdups (map f xs)" by (induct xs) simp_all lemma distinct_zipI1: assumes "distinct xs" shows "distinct (zip xs ys)" proof (rule zip_obtain_same_length) fix xs' :: "'a list" and ys' :: "'b list" and n assume "length xs' = length ys'" assume "xs' = take n xs" with assms have "distinct xs'" by simp with \length xs' = length ys'\ show "distinct (zip xs' ys')" by (induct xs' ys' rule: list_induct2) (auto elim: in_set_zipE) qed lemma distinct_zipI2: assumes "distinct ys" shows "distinct (zip xs ys)" proof (rule zip_obtain_same_length) fix xs' :: "'b list" and ys' :: "'a list" and n assume "length xs' = length ys'" assume "ys' = take n ys" with assms have "distinct ys'" by simp with \length xs' = length ys'\ show "distinct (zip xs' ys')" by (induct xs' ys' rule: list_induct2) (auto elim: in_set_zipE) qed lemma set_take_disj_set_drop_if_distinct: "distinct vs \ i \ j \ set (take i vs) \ set (drop j vs) = {}" by (auto simp: in_set_conv_nth distinct_conv_nth) (* The next two lemmas help Sledgehammer. *) lemma distinct_singleton: "distinct [x]" by simp lemma distinct_length_2_or_more: "distinct (a # b # xs) \ (a \ b \ distinct (a # xs) \ distinct (b # xs))" by force lemma remdups_adj_altdef: "(remdups_adj xs = ys) \ (\f::nat => nat. mono f \ f ` {0 ..< size xs} = {0 ..< size ys} \ (\i < size xs. xs!i = ys!(f i)) \ (\i. i + 1 < size xs \ (xs!i = xs!(i+1) \ f i = f(i+1))))" (is "?L \ (\f. ?p f xs ys)") proof assume ?L then show "\f. ?p f xs ys" proof (induct xs arbitrary: ys rule: remdups_adj.induct) case (1 ys) thus ?case by (intro exI[of _ id]) (auto simp: mono_def) next case (2 x ys) thus ?case by (intro exI[of _ id]) (auto simp: mono_def) next case (3 x1 x2 xs ys) let ?xs = "x1 # x2 # xs" let ?cond = "x1 = x2" define zs where "zs = remdups_adj (x2 # xs)" from 3(1-2)[of zs] obtain f where p: "?p f (x2 # xs) zs" unfolding zs_def by (cases ?cond) auto then have f0: "f 0 = 0" by (intro mono_image_least[where f=f]) blast+ from p have mono: "mono f" and f_xs_zs: "f ` {0.. []" unfolding zs_def by (induct xs) auto let ?Succ = "if ?cond then id else Suc" let ?x1 = "if ?cond then id else Cons x1" let ?f = "\ i. if i = 0 then 0 else ?Succ (f (i - 1))" have ys: "ys = ?x1 zs" unfolding ys by (cases ?cond, auto) have mono: "mono ?f" using \mono f\ unfolding mono_def by auto show ?case unfolding ys proof (intro exI[of _ ?f] conjI allI impI) show "mono ?f" by fact next fix i assume i: "i < length ?xs" with p show "?xs ! i = ?x1 zs ! (?f i)" using zs0 by auto next fix i assume i: "i + 1 < length ?xs" with p show "(?xs ! i = ?xs ! (i + 1)) = (?f i = ?f (i + 1))" by (cases i) (auto simp: f0) next have id: "{0 ..< length (?x1 zs)} = insert 0 (?Succ ` {0 ..< length zs})" using zsne by (cases ?cond, auto) { fix i assume "i < Suc (length xs)" hence "Suc i \ {0.. Collect ((<) 0)" by auto from imageI[OF this, of "\i. ?Succ (f (i - Suc 0))"] have "?Succ (f i) \ (\i. ?Succ (f (i - Suc 0))) ` ({0.. Collect ((<) 0))" by auto } then show "?f ` {0 ..< length ?xs} = {0 ..< length (?x1 zs)}" unfolding id f_xs_zs[symmetric] by auto qed qed next assume "\ f. ?p f xs ys" then show ?L proof (induct xs arbitrary: ys rule: remdups_adj.induct) case 1 then show ?case by auto next case (2 x) then obtain f where f_img: "f ` {0 ..< size [x]} = {0 ..< size ys}" and f_nth: "\i. i < size [x] \ [x]!i = ys!(f i)" by blast have "length ys = card (f ` {0 ..< size [x]})" using f_img by auto then have *: "length ys = 1" by auto then have "f 0 = 0" using f_img by auto with * show ?case using f_nth by (cases ys) auto next case (3 x1 x2 xs) from "3.prems" obtain f where f_mono: "mono f" and f_img: "f ` {0..i. i < length (x1 # x2 # xs) \ (x1 # x2 # xs) ! i = ys ! f i" "\i. i + 1 < length (x1 # x2 #xs) \ ((x1 # x2 # xs) ! i = (x1 # x2 # xs) ! (i + 1)) = (f i = f (i + 1))" by blast show ?case proof cases assume "x1 = x2" let ?f' = "f \ Suc" have "remdups_adj (x1 # xs) = ys" proof (intro "3.hyps" exI conjI impI allI) show "mono ?f'" using f_mono by (simp add: mono_iff_le_Suc) next have "?f' ` {0 ..< length (x1 # xs)} = f ` {Suc 0 ..< length (x1 # x2 # xs)}" using less_Suc_eq_0_disj by auto also have "\ = f ` {0 ..< length (x1 # x2 # xs)}" proof - have "f 0 = f (Suc 0)" using \x1 = x2\ f_nth[of 0] by simp then show ?thesis using less_Suc_eq_0_disj by auto qed also have "\ = {0 ..< length ys}" by fact finally show "?f' ` {0 ..< length (x1 # xs)} = {0 ..< length ys}" . qed (insert f_nth[of "Suc i" for i], auto simp: \x1 = x2\) then show ?thesis using \x1 = x2\ by simp next assume "x1 \ x2" have two: "Suc (Suc 0) \ length ys" proof - have "2 = card {f 0, f 1}" using \x1 \ x2\ f_nth[of 0] by auto also have "\ \ card (f ` {0..< length (x1 # x2 # xs)})" by (rule card_mono) auto finally show ?thesis using f_img by simp qed have "f 0 = 0" using f_mono f_img by (rule mono_image_least) simp have "f (Suc 0) = Suc 0" proof (rule ccontr) assume "f (Suc 0) \ Suc 0" then have "Suc 0 < f (Suc 0)" using f_nth[of 0] \x1 \ x2\ \f 0 = 0\ by auto then have "\i. Suc 0 < f (Suc i)" using f_mono by (meson Suc_le_mono le0 less_le_trans monoD) then have "Suc 0 \ f i" for i using \f 0 = 0\ by (cases i) fastforce+ then have "Suc 0 \ f ` {0 ..< length (x1 # x2 # xs)}" by auto then show False using f_img two by auto qed obtain ys' where "ys = x1 # x2 # ys'" using two f_nth[of 0] f_nth[of 1] by (auto simp: Suc_le_length_iff \f 0 = 0\ \f (Suc 0) = Suc 0\) have Suc0_le_f_Suc: "Suc 0 \ f (Suc i)" for i by (metis Suc_le_mono \f (Suc 0) = Suc 0\ f_mono le0 mono_def) define f' where "f' x = f (Suc x) - 1" for x have f_Suc: "f (Suc i) = Suc (f' i)" for i using Suc0_le_f_Suc[of i] by (auto simp: f'_def) have "remdups_adj (x2 # xs) = (x2 # ys')" proof (intro "3.hyps" exI conjI impI allI) show "mono f'" using Suc0_le_f_Suc f_mono by (auto simp: f'_def mono_iff_le_Suc le_diff_iff) next have "f' ` {0 ..< length (x2 # xs)} = (\x. f x - 1) ` {0 ..< length (x1 # x2 #xs)}" by (auto simp: f'_def \f 0 = 0\ \f (Suc 0) = Suc 0\ image_def Bex_def less_Suc_eq_0_disj) also have "\ = (\x. x - 1) ` f ` {0 ..< length (x1 # x2 #xs)}" by (auto simp: image_comp) also have "\ = (\x. x - 1) ` {0 ..< length ys}" by (simp only: f_img) also have "\ = {0 ..< length (x2 # ys')}" using \ys = _\ by (fastforce intro: rev_image_eqI) finally show "f' ` {0 ..< length (x2 # xs)} = {0 ..< length (x2 # ys')}" . qed (insert f_nth[of "Suc i" for i] \x1 \ x2\, auto simp add: f_Suc \ys = _\) then show ?case using \ys = _\ \x1 \ x2\ by simp qed qed qed lemma hd_remdups_adj[simp]: "hd (remdups_adj xs) = hd xs" by (induction xs rule: remdups_adj.induct) simp_all lemma remdups_adj_Cons: "remdups_adj (x # xs) = (case remdups_adj xs of [] \ [x] | y # xs \ if x = y then y # xs else x # y # xs)" by (induct xs arbitrary: x) (auto split: list.splits) lemma remdups_adj_append_two: "remdups_adj (xs @ [x,y]) = remdups_adj (xs @ [x]) @ (if x = y then [] else [y])" by (induct xs rule: remdups_adj.induct, simp_all) lemma remdups_adj_adjacent: "Suc i < length (remdups_adj xs) \ remdups_adj xs ! i \ remdups_adj xs ! Suc i" proof (induction xs arbitrary: i rule: remdups_adj.induct) case (3 x y xs i) thus ?case by (cases i, cases "x = y") (simp, auto simp: hd_conv_nth[symmetric]) qed simp_all lemma remdups_adj_rev[simp]: "remdups_adj (rev xs) = rev (remdups_adj xs)" by (induct xs rule: remdups_adj.induct, simp_all add: remdups_adj_append_two) lemma remdups_adj_length[simp]: "length (remdups_adj xs) \ length xs" by (induct xs rule: remdups_adj.induct, auto) lemma remdups_adj_length_ge1[simp]: "xs \ [] \ length (remdups_adj xs) \ Suc 0" by (induct xs rule: remdups_adj.induct, simp_all) lemma remdups_adj_Nil_iff[simp]: "remdups_adj xs = [] \ xs = []" by (induct xs rule: remdups_adj.induct, simp_all) lemma remdups_adj_set[simp]: "set (remdups_adj xs) = set xs" by (induct xs rule: remdups_adj.induct, simp_all) lemma last_remdups_adj [simp]: "last (remdups_adj xs) = last xs" by (induction xs rule: remdups_adj.induct) auto lemma remdups_adj_Cons_alt[simp]: "x # tl (remdups_adj (x # xs)) = remdups_adj (x # xs)" by (induct xs rule: remdups_adj.induct, auto) lemma remdups_adj_distinct: "distinct xs \ remdups_adj xs = xs" by (induct xs rule: remdups_adj.induct, simp_all) lemma remdups_adj_append: "remdups_adj (xs\<^sub>1 @ x # xs\<^sub>2) = remdups_adj (xs\<^sub>1 @ [x]) @ tl (remdups_adj (x # xs\<^sub>2))" by (induct xs\<^sub>1 rule: remdups_adj.induct, simp_all) lemma remdups_adj_singleton: "remdups_adj xs = [x] \ xs = replicate (length xs) x" by (induct xs rule: remdups_adj.induct, auto split: if_split_asm) lemma remdups_adj_map_injective: assumes "inj f" shows "remdups_adj (map f xs) = map f (remdups_adj xs)" by (induct xs rule: remdups_adj.induct) (auto simp add: injD[OF assms]) lemma remdups_adj_replicate: "remdups_adj (replicate n x) = (if n = 0 then [] else [x])" by (induction n) (auto simp: remdups_adj_Cons) lemma remdups_upt [simp]: "remdups [m.. n") case False then show ?thesis by simp next case True then obtain q where "n = m + q" by (auto simp add: le_iff_add) moreover have "remdups [m.. successively P (remdups_adj xs)" by (induction xs rule: remdups_adj.induct) (auto simp: successively_Cons) lemma successively_remdups_adj_iff: "(\x. x \ set xs \ P x x) \ successively P (remdups_adj xs) \ successively P xs" by (induction xs rule: remdups_adj.induct)(auto simp: successively_Cons) lemma remdups_adj_Cons': "remdups_adj (x # xs) = x # remdups_adj (dropWhile (\y. y = x) xs)" by (induction xs) auto lemma remdups_adj_singleton_iff: "length (remdups_adj xs) = Suc 0 \ xs \ [] \ xs = replicate (length xs) (hd xs)" proof safe assume *: "xs = replicate (length xs) (hd xs)" and [simp]: "xs \ []" show "length (remdups_adj xs) = Suc 0" by (subst *) (auto simp: remdups_adj_replicate) next assume "length (remdups_adj xs) = Suc 0" thus "xs = replicate (length xs) (hd xs)" by (induction xs rule: remdups_adj.induct) (auto split: if_splits) qed auto lemma tl_remdups_adj: "ys \ [] \ tl (remdups_adj ys) = remdups_adj (dropWhile (\x. x = hd ys) (tl ys))" by (cases ys) (simp_all add: remdups_adj_Cons') lemma remdups_adj_append_dropWhile: "remdups_adj (xs @ y # ys) = remdups_adj (xs @ [y]) @ remdups_adj (dropWhile (\x. x = y) ys)" by (subst remdups_adj_append) (simp add: tl_remdups_adj) lemma remdups_adj_append': assumes "xs = [] \ ys = [] \ last xs \ hd ys" shows "remdups_adj (xs @ ys) = remdups_adj xs @ remdups_adj ys" proof - have ?thesis if [simp]: "xs \ []" "ys \ []" and "last xs \ hd ys" proof - obtain x xs' where xs: "xs = xs' @ [x]" by (cases xs rule: rev_cases) auto have "remdups_adj (xs' @ x # ys) = remdups_adj (xs' @ [x]) @ remdups_adj ys" using \last xs \ hd ys\ unfolding xs by (metis (full_types) dropWhile_eq_self_iff last_snoc remdups_adj_append_dropWhile) thus ?thesis by (simp add: xs) qed thus ?thesis using assms by (cases "xs = []"; cases "ys = []") auto qed lemma remdups_adj_append'': "xs \ [] \ remdups_adj (xs @ ys) = remdups_adj xs @ remdups_adj (dropWhile (\y. y = last xs) ys)" by (induction xs rule: remdups_adj.induct) (auto simp: remdups_adj_Cons') subsection \@{const distinct_adj}\ lemma distinct_adj_Nil [simp]: "distinct_adj []" and distinct_adj_singleton [simp]: "distinct_adj [x]" and distinct_adj_Cons_Cons [simp]: "distinct_adj (x # y # xs) \ x \ y \ distinct_adj (y # xs)" by (auto simp: distinct_adj_def) lemma distinct_adj_Cons: "distinct_adj (x # xs) \ xs = [] \ x \ hd xs \ distinct_adj xs" by (cases xs) auto lemma distinct_adj_ConsD: "distinct_adj (x # xs) \ distinct_adj xs" by (cases xs) auto lemma distinct_adj_remdups_adj[simp]: "distinct_adj (remdups_adj xs)" by (induction xs rule: remdups_adj.induct) (auto simp: distinct_adj_Cons) lemma distinct_adj_altdef: "distinct_adj xs \ remdups_adj xs = xs" proof assume "remdups_adj xs = xs" with distinct_adj_remdups_adj[of xs] show "distinct_adj xs" by simp next assume "distinct_adj xs" thus "remdups_adj xs = xs" by (induction xs rule: induct_list012) auto qed lemma distinct_adj_rev [simp]: "distinct_adj (rev xs) \ distinct_adj xs" by (simp add: distinct_adj_def eq_commute) lemma distinct_adj_append_iff: "distinct_adj (xs @ ys) \ distinct_adj xs \ distinct_adj ys \ (xs = [] \ ys = [] \ last xs \ hd ys)" by (auto simp: distinct_adj_def successively_append_iff) lemma distinct_adj_appendD1 [dest]: "distinct_adj (xs @ ys) \ distinct_adj xs" and distinct_adj_appendD2 [dest]: "distinct_adj (xs @ ys) \ distinct_adj ys" by (auto simp: distinct_adj_append_iff) lemma distinct_adj_mapI: "distinct_adj xs \ inj_on f (set xs) \ distinct_adj (map f xs)" unfolding distinct_adj_def successively_map by (erule successively_mono) (auto simp: inj_on_def) lemma distinct_adj_mapD: "distinct_adj (map f xs) \ distinct_adj xs" unfolding distinct_adj_def successively_map by (erule successively_mono) auto lemma distinct_adj_map_iff: "inj_on f (set xs) \ distinct_adj (map f xs) \ distinct_adj xs" using distinct_adj_mapD distinct_adj_mapI by blast subsubsection \\<^const>\insert\\ lemma in_set_insert [simp]: "x \ set xs \ List.insert x xs = xs" by (simp add: List.insert_def) lemma not_in_set_insert [simp]: "x \ set xs \ List.insert x xs = x # xs" by (simp add: List.insert_def) lemma insert_Nil [simp]: "List.insert x [] = [x]" by simp lemma set_insert [simp]: "set (List.insert x xs) = insert x (set xs)" by (auto simp add: List.insert_def) lemma distinct_insert [simp]: "distinct (List.insert x xs) = distinct xs" by (simp add: List.insert_def) lemma insert_remdups: "List.insert x (remdups xs) = remdups (List.insert x xs)" by (simp add: List.insert_def) subsubsection \\<^const>\List.union\\ text\This is all one should need to know about union:\ lemma set_union[simp]: "set (List.union xs ys) = set xs \ set ys" unfolding List.union_def by(induct xs arbitrary: ys) simp_all lemma distinct_union[simp]: "distinct(List.union xs ys) = distinct ys" unfolding List.union_def by(induct xs arbitrary: ys) simp_all subsubsection \\<^const>\List.find\\ lemma find_None_iff: "List.find P xs = None \ \ (\x. x \ set xs \ P x)" proof (induction xs) case Nil thus ?case by simp next case (Cons x xs) thus ?case by (fastforce split: if_splits) qed +lemmas find_None_iff2 = find_None_iff[THEN eq_iff_swap] + lemma find_Some_iff: "List.find P xs = Some x \ (\i x = xs!i \ (\j P (xs!j)))" proof (induction xs) case Nil thus ?case by simp next case (Cons x xs) thus ?case apply(auto simp: nth_Cons' split: if_splits) using diff_Suc_1[unfolded One_nat_def] less_Suc_eq_0_disj by fastforce qed +lemmas find_Some_iff2 = find_Some_iff[THEN eq_iff_swap] + lemma find_cong[fundef_cong]: assumes "xs = ys" and "\x. x \ set ys \ P x = Q x" shows "List.find P xs = List.find Q ys" proof (cases "List.find P xs") case None thus ?thesis by (metis find_None_iff assms) next case (Some x) hence "List.find Q ys = Some x" using assms by (auto simp add: find_Some_iff) thus ?thesis using Some by auto qed lemma find_dropWhile: "List.find P xs = (case dropWhile (Not \ P) xs of [] \ None | x # _ \ Some x)" by (induct xs) simp_all subsubsection \\<^const>\count_list\\ lemma count_notin[simp]: "x \ set xs \ count_list xs x = 0" by (induction xs) auto lemma count_le_length: "count_list xs x \ length xs" by (induction xs) auto lemma sum_count_set: "set xs \ X \ finite X \ sum (count_list xs) X = length xs" proof (induction xs arbitrary: X) case (Cons x xs) then show ?case using sum.remove [of X x "count_list xs"] by (auto simp: sum.If_cases simp flip: diff_eq) qed simp subsubsection \\<^const>\List.extract\\ lemma extract_None_iff: "List.extract P xs = None \ \ (\ x\set xs. P x)" by(auto simp: extract_def dropWhile_eq_Cons_conv split: list.splits) (metis in_set_conv_decomp) lemma extract_SomeE: "List.extract P xs = Some (ys, y, zs) \ xs = ys @ y # zs \ P y \ \ (\ y \ set ys. P y)" by(auto simp: extract_def dropWhile_eq_Cons_conv split: list.splits) lemma extract_Some_iff: "List.extract P xs = Some (ys, y, zs) \ xs = ys @ y # zs \ P y \ \ (\ y \ set ys. P y)" by(auto simp: extract_def dropWhile_eq_Cons_conv dest: set_takeWhileD split: list.splits) lemma extract_Nil_code[code]: "List.extract P [] = None" by(simp add: extract_def) lemma extract_Cons_code[code]: "List.extract P (x # xs) = (if P x then Some ([], x, xs) else (case List.extract P xs of None \ None | Some (ys, y, zs) \ Some (x#ys, y, zs)))" by(auto simp add: extract_def comp_def split: list.splits) (metis dropWhile_eq_Nil_conv list.distinct(1)) subsubsection \\<^const>\remove1\\ lemma remove1_append: "remove1 x (xs @ ys) = (if x \ set xs then remove1 x xs @ ys else xs @ remove1 x ys)" by (induct xs) auto lemma remove1_commute: "remove1 x (remove1 y zs) = remove1 y (remove1 x zs)" by (induct zs) auto lemma in_set_remove1[simp]: "a \ b \ a \ set(remove1 b xs) = (a \ set xs)" by (induct xs) auto lemma set_remove1_subset: "set(remove1 x xs) \ set xs" by (induct xs) auto lemma set_remove1_eq [simp]: "distinct xs \ set(remove1 x xs) = set xs - {x}" by (induct xs) auto lemma length_remove1: "length(remove1 x xs) = (if x \ set xs then length xs - 1 else length xs)" by (induct xs) (auto dest!:length_pos_if_in_set) lemma remove1_filter_not[simp]: "\ P x \ remove1 x (filter P xs) = filter P xs" by(induct xs) auto lemma filter_remove1: "filter Q (remove1 x xs) = remove1 x (filter Q xs)" by (induct xs) auto lemma notin_set_remove1[simp]: "x \ set xs \ x \ set(remove1 y xs)" by(insert set_remove1_subset) fast lemma distinct_remove1[simp]: "distinct xs \ distinct(remove1 x xs)" by (induct xs) simp_all lemma remove1_remdups: "distinct xs \ remove1 x (remdups xs) = remdups (remove1 x xs)" by (induct xs) simp_all lemma remove1_idem: "x \ set xs \ remove1 x xs = xs" by (induct xs) simp_all lemma remove1_split: "a \ set xs \ remove1 a xs = ys \ (\ls rs. xs = ls @ a # rs \ a \ set ls \ ys = ls @ rs)" by (metis remove1.simps(2) remove1_append split_list_first) subsubsection \\<^const>\removeAll\\ lemma removeAll_filter_not_eq: "removeAll x = filter (\y. x \ y)" proof fix xs show "removeAll x xs = filter (\y. x \ y) xs" by (induct xs) auto qed lemma removeAll_append[simp]: "removeAll x (xs @ ys) = removeAll x xs @ removeAll x ys" by (induct xs) auto lemma set_removeAll[simp]: "set(removeAll x xs) = set xs - {x}" by (induct xs) auto lemma removeAll_id[simp]: "x \ set xs \ removeAll x xs = xs" by (induct xs) auto (* Needs count:: 'a \ 'a list \ nat lemma length_removeAll: "length(removeAll x xs) = length xs - count x xs" *) lemma removeAll_filter_not[simp]: "\ P x \ removeAll x (filter P xs) = filter P xs" by(induct xs) auto lemma distinct_removeAll: "distinct xs \ distinct (removeAll x xs)" by (simp add: removeAll_filter_not_eq) lemma distinct_remove1_removeAll: "distinct xs \ remove1 x xs = removeAll x xs" by (induct xs) simp_all lemma map_removeAll_inj_on: "inj_on f (insert x (set xs)) \ map f (removeAll x xs) = removeAll (f x) (map f xs)" by (induct xs) (simp_all add:inj_on_def) lemma map_removeAll_inj: "inj f \ map f (removeAll x xs) = removeAll (f x) (map f xs)" by (rule map_removeAll_inj_on, erule subset_inj_on, rule subset_UNIV) lemma length_removeAll_less_eq [simp]: "length (removeAll x xs) \ length xs" by (simp add: removeAll_filter_not_eq) lemma length_removeAll_less [termination_simp]: "x \ set xs \ length (removeAll x xs) < length xs" by (auto dest: length_filter_less simp add: removeAll_filter_not_eq) lemma distinct_concat_iff: "distinct (concat xs) \ distinct (removeAll [] xs) \ (\ys. ys \ set xs \ distinct ys) \ (\ys zs. ys \ set xs \ zs \ set xs \ ys \ zs \ set ys \ set zs = {})" apply (induct xs) apply(simp_all, safe, auto) by (metis Int_iff UN_I empty_iff equals0I set_empty) subsubsection \\<^const>\replicate\\ lemma length_replicate [simp]: "length (replicate n x) = n" by (induct n) auto lemma replicate_eqI: assumes "length xs = n" and "\y. y \ set xs \ y = x" shows "xs = replicate n x" using assms proof (induct xs arbitrary: n) case Nil then show ?case by simp next case (Cons x xs) then show ?case by (cases n) simp_all qed lemma Ex_list_of_length: "\xs. length xs = n" by (rule exI[of _ "replicate n undefined"]) simp lemma map_replicate [simp]: "map f (replicate n x) = replicate n (f x)" by (induct n) auto lemma map_replicate_const: "map (\ x. k) lst = replicate (length lst) k" by (induct lst) auto lemma replicate_app_Cons_same: "(replicate n x) @ (x # xs) = x # replicate n x @ xs" by (induct n) auto lemma rev_replicate [simp]: "rev (replicate n x) = replicate n x" by (induct n) (auto simp: replicate_app_Cons_same) lemma replicate_add: "replicate (n + m) x = replicate n x @ replicate m x" by (induct n) auto text\Courtesy of Matthias Daum:\ lemma append_replicate_commute: "replicate n x @ replicate k x = replicate k x @ replicate n x" by (metis add.commute replicate_add) text\Courtesy of Andreas Lochbihler:\ lemma filter_replicate: "filter P (replicate n x) = (if P x then replicate n x else [])" by(induct n) auto lemma hd_replicate [simp]: "n \ 0 \ hd (replicate n x) = x" by (induct n) auto lemma tl_replicate [simp]: "tl (replicate n x) = replicate (n - 1) x" by (induct n) auto lemma last_replicate [simp]: "n \ 0 \ last (replicate n x) = x" by (atomize (full), induct n) auto lemma nth_replicate[simp]: "i < n \ (replicate n x)!i = x" by (induct n arbitrary: i)(auto simp: nth_Cons split: nat.split) text\Courtesy of Matthias Daum (2 lemmas):\ lemma take_replicate[simp]: "take i (replicate k x) = replicate (min i k) x" proof (cases "k \ i") case True then show ?thesis by (simp add: min_def) next case False then have "replicate k x = replicate i x @ replicate (k - i) x" by (simp add: replicate_add [symmetric]) then show ?thesis by (simp add: min_def) qed lemma drop_replicate[simp]: "drop i (replicate k x) = replicate (k-i) x" proof (induct k arbitrary: i) case (Suc k) then show ?case by (simp add: drop_Cons') qed simp lemma set_replicate_Suc: "set (replicate (Suc n) x) = {x}" by (induct n) auto lemma set_replicate [simp]: "n \ 0 \ set (replicate n x) = {x}" by (fast dest!: not0_implies_Suc intro!: set_replicate_Suc) lemma set_replicate_conv_if: "set (replicate n x) = (if n = 0 then {} else {x})" by auto lemma in_set_replicate[simp]: "(x \ set (replicate n y)) = (x = y \ n \ 0)" by (simp add: set_replicate_conv_if) lemma Ball_set_replicate[simp]: "(\x \ set(replicate n a). P x) = (P a \ n=0)" by(simp add: set_replicate_conv_if) lemma Bex_set_replicate[simp]: "(\x \ set(replicate n a). P x) = (P a \ n\0)" by(simp add: set_replicate_conv_if) lemma replicate_append_same: "replicate i x @ [x] = x # replicate i x" by (induct i) simp_all lemma map_replicate_trivial: "map (\i. x) [0.. n=0" by (induct n) auto lemmas empty_replicate[simp] = replicate_empty[THEN eq_iff_swap] lemma replicate_eq_replicate[simp]: "(replicate m x = replicate n y) \ (m=n \ (m\0 \ x=y))" proof (induct m arbitrary: n) case (Suc m n) then show ?case by (induct n) auto qed simp lemma takeWhile_replicate[simp]: "takeWhile P (replicate n x) = (if P x then replicate n x else [])" using takeWhile_eq_Nil_iff by fastforce lemma dropWhile_replicate[simp]: "dropWhile P (replicate n x) = (if P x then [] else replicate n x)" using dropWhile_eq_self_iff by fastforce lemma replicate_length_filter: "replicate (length (filter (\y. x = y) xs)) x = filter (\y. x = y) xs" by (induct xs) auto lemma comm_append_are_replicate: "xs @ ys = ys @ xs \ \m n zs. concat (replicate m zs) = xs \ concat (replicate n zs) = ys" proof (induction "length (xs @ ys) + length xs" arbitrary: xs ys rule: less_induct) case less consider (1) "length ys < length xs" | (2) "xs = []" | (3) "length xs \ length ys \ xs \ []" by linarith then show ?case proof (cases) case 1 then show ?thesis using less.hyps[OF _ less.prems[symmetric]] nat_add_left_cancel_less by auto next case 2 then have "concat (replicate 0 ys) = xs \ concat (replicate 1 ys) = ys" by simp then show ?thesis by blast next case 3 then have "length xs \ length ys" and "xs \ []" by blast+ from \length xs \ length ys\ and \xs @ ys = ys @ xs\ obtain ws where "ys = xs @ ws" by (auto simp: append_eq_append_conv2) from this and \xs \ []\ have "length ws < length ys" by simp from \xs @ ys = ys @ xs\[unfolded \ys = xs @ ws\] have "xs @ ws = ws @ xs" by simp from less.hyps[OF _ this] \length ws < length ys\ obtain m n' zs where "concat (replicate m zs) = xs" and "concat (replicate n' zs) = ws" by auto then have "concat (replicate (m+n') zs) = ys" using \ys = xs @ ws\ by (simp add: replicate_add) then show ?thesis using \concat (replicate m zs) = xs\ by blast qed qed lemma comm_append_is_replicate: fixes xs ys :: "'a list" assumes "xs \ []" "ys \ []" assumes "xs @ ys = ys @ xs" shows "\n zs. n > 1 \ concat (replicate n zs) = xs @ ys" proof - obtain m n zs where "concat (replicate m zs) = xs" and "concat (replicate n zs) = ys" using comm_append_are_replicate[OF assms(3)] by blast then have "m + n > 1" and "concat (replicate (m+n) zs) = xs @ ys" using \xs \ []\ and \ys \ []\ by (auto simp: replicate_add) then show ?thesis by blast qed lemma Cons_replicate_eq: "x # xs = replicate n y \ x = y \ n > 0 \ xs = replicate (n - 1) x" by (induct n) auto lemma replicate_length_same: "(\y\set xs. y = x) \ replicate (length xs) x = xs" by (induct xs) simp_all lemma foldr_replicate [simp]: "foldr f (replicate n x) = f x ^^ n" by (induct n) (simp_all) lemma fold_replicate [simp]: "fold f (replicate n x) = f x ^^ n" by (subst foldr_fold [symmetric]) simp_all subsubsection \\<^const>\enumerate\\ lemma enumerate_simps [simp, code]: "enumerate n [] = []" "enumerate n (x # xs) = (n, x) # enumerate (Suc n) xs" by (simp_all add: enumerate_eq_zip upt_rec) lemma length_enumerate [simp]: "length (enumerate n xs) = length xs" by (simp add: enumerate_eq_zip) lemma map_fst_enumerate [simp]: "map fst (enumerate n xs) = [n.. set (enumerate n xs) \ n \ fst p \ fst p < length xs + n \ nth xs (fst p - n) = snd p" proof - { fix m assume "n \ m" moreover assume "m < length xs + n" ultimately have "[n.. xs ! (m - n) = xs ! (m - n) \ m - n < length xs" by auto then have "\q. [n.. xs ! q = xs ! (m - n) \ q < length xs" .. } then show ?thesis by (cases p) (auto simp add: enumerate_eq_zip in_set_zip) qed lemma nth_enumerate_eq: "m < length xs \ enumerate n xs ! m = (n + m, xs ! m)" by (simp add: enumerate_eq_zip) lemma enumerate_replicate_eq: "enumerate n (replicate m a) = map (\q. (q, a)) [n..k. (k, f k)) [n.. m") (simp_all add: zip_map2 zip_same_conv_map enumerate_eq_zip) subsubsection \\<^const>\rotate1\ and \<^const>\rotate\\ lemma rotate0[simp]: "rotate 0 = id" by(simp add:rotate_def) lemma rotate_Suc[simp]: "rotate (Suc n) xs = rotate1(rotate n xs)" by(simp add:rotate_def) lemma rotate_add: "rotate (m+n) = rotate m \ rotate n" by(simp add:rotate_def funpow_add) lemma rotate_rotate: "rotate m (rotate n xs) = rotate (m+n) xs" by(simp add:rotate_add) lemma rotate1_map: "rotate1 (map f xs) = map f (rotate1 xs)" by(cases xs) simp_all lemma rotate1_rotate_swap: "rotate1 (rotate n xs) = rotate n (rotate1 xs)" by(simp add:rotate_def funpow_swap1) lemma rotate1_length01[simp]: "length xs \ 1 \ rotate1 xs = xs" by(cases xs) simp_all lemma rotate_length01[simp]: "length xs \ 1 \ rotate n xs = xs" by (induct n) (simp_all add:rotate_def) lemma rotate1_hd_tl: "xs \ [] \ rotate1 xs = tl xs @ [hd xs]" by (cases xs) simp_all lemma rotate_drop_take: "rotate n xs = drop (n mod length xs) xs @ take (n mod length xs) xs" proof (induct n) case (Suc n) show ?case proof (cases "xs = []") case False then show ?thesis proof (cases "n mod length xs = 0") case True then show ?thesis by (auto simp add: mod_Suc False Suc.hyps drop_Suc rotate1_hd_tl take_Suc Suc_length_conv) next case False with \xs \ []\ Suc show ?thesis by (simp add: rotate_def mod_Suc rotate1_hd_tl drop_Suc[symmetric] drop_tl[symmetric] take_hd_drop linorder_not_le) qed qed simp qed simp lemma rotate_conv_mod: "rotate n xs = rotate (n mod length xs) xs" by(simp add:rotate_drop_take) lemma rotate_id[simp]: "n mod length xs = 0 \ rotate n xs = xs" by(simp add:rotate_drop_take) lemma length_rotate1[simp]: "length(rotate1 xs) = length xs" by (cases xs) simp_all lemma length_rotate[simp]: "length(rotate n xs) = length xs" by (induct n arbitrary: xs) (simp_all add:rotate_def) lemma distinct1_rotate[simp]: "distinct(rotate1 xs) = distinct xs" by (cases xs) auto lemma distinct_rotate[simp]: "distinct(rotate n xs) = distinct xs" by (induct n) (simp_all add:rotate_def) lemma rotate_map: "rotate n (map f xs) = map f (rotate n xs)" by(simp add:rotate_drop_take take_map drop_map) lemma set_rotate1[simp]: "set(rotate1 xs) = set xs" by (cases xs) auto lemma set_rotate[simp]: "set(rotate n xs) = set xs" by (induct n) (simp_all add:rotate_def) lemma rotate1_is_Nil_conv[simp]: "(rotate1 xs = []) = (xs = [])" by (cases xs) auto lemma rotate_is_Nil_conv[simp]: "(rotate n xs = []) = (xs = [])" by (induct n) (simp_all add:rotate_def) lemma rotate_rev: "rotate n (rev xs) = rev(rotate (length xs - (n mod length xs)) xs)" proof (cases "length xs = 0 \ n mod length xs = 0") case False then show ?thesis by(simp add:rotate_drop_take rev_drop rev_take) qed force lemma hd_rotate_conv_nth: assumes "xs \ []" shows "hd(rotate n xs) = xs!(n mod length xs)" proof - have "n mod length xs < length xs" using assms by simp then show ?thesis by (metis drop_eq_Nil hd_append2 hd_drop_conv_nth leD rotate_drop_take) qed lemma rotate_append: "rotate (length l) (l @ q) = q @ l" by (induct l arbitrary: q) (auto simp add: rotate1_rotate_swap) lemma nth_rotate: \rotate m xs ! n = xs ! ((m + n) mod length xs)\ if \n < length xs\ using that apply (auto simp add: rotate_drop_take nth_append not_less less_diff_conv ac_simps dest!: le_Suc_ex) apply (metis add.commute mod_add_right_eq mod_less) apply (metis (no_types, lifting) Nat.diff_diff_right add.commute add_diff_cancel_right' diff_le_self dual_order.strict_trans2 length_greater_0_conv less_nat_zero_code list.size(3) mod_add_right_eq mod_add_self2 mod_le_divisor mod_less) done lemma nth_rotate1: \rotate1 xs ! n = xs ! (Suc n mod length xs)\ if \n < length xs\ using that nth_rotate [of n xs 1] by simp subsubsection \\<^const>\nths\ --- a generalization of \<^const>\nth\ to sets\ lemma nths_empty [simp]: "nths xs {} = []" by (auto simp add: nths_def) lemma nths_nil [simp]: "nths [] A = []" by (auto simp add: nths_def) lemma nths_all: "\i < length xs. i \ I \ nths xs I = xs" apply (simp add: nths_def) apply (subst filter_True) apply (auto simp: in_set_zip subset_iff) done lemma length_nths: "length (nths xs I) = card{i. i < length xs \ i \ I}" by(simp add: nths_def length_filter_conv_card cong:conj_cong) lemma nths_shift_lemma_Suc: "map fst (filter (\p. P(Suc(snd p))) (zip xs is)) = map fst (filter (\p. P(snd p)) (zip xs (map Suc is)))" proof (induct xs arbitrary: "is") case (Cons x xs "is") show ?case by (cases "is") (auto simp add: Cons.hyps) qed simp lemma nths_shift_lemma: "map fst (filter (\p. snd p \ A) (zip xs [i..p. snd p + i \ A) (zip xs [0.. A}" unfolding nths_def proof (induct l' rule: rev_induct) case (snoc x xs) then show ?case by (simp add: upt_add_eq_append[of 0] nths_shift_lemma add.commute) qed auto lemma nths_Cons: "nths (x # l) A = (if 0 \ A then [x] else []) @ nths l {j. Suc j \ A}" proof (induct l rule: rev_induct) case (snoc x xs) then show ?case by (simp flip: append_Cons add: nths_append) qed (auto simp: nths_def) lemma nths_map: "nths (map f xs) I = map f (nths xs I)" by(induction xs arbitrary: I) (simp_all add: nths_Cons) lemma set_nths: "set(nths xs I) = {xs!i|i. i i \ I}" by (induct xs arbitrary: I) (auto simp: nths_Cons nth_Cons split:nat.split dest!: gr0_implies_Suc) lemma set_nths_subset: "set(nths xs I) \ set xs" by(auto simp add:set_nths) lemma notin_set_nthsI[simp]: "x \ set xs \ x \ set(nths xs I)" by(auto simp add:set_nths) lemma in_set_nthsD: "x \ set(nths xs I) \ x \ set xs" by(auto simp add:set_nths) lemma nths_singleton [simp]: "nths [x] A = (if 0 \ A then [x] else [])" by (simp add: nths_Cons) lemma distinct_nthsI[simp]: "distinct xs \ distinct (nths xs I)" by (induct xs arbitrary: I) (auto simp: nths_Cons) lemma nths_upt_eq_take [simp]: "nths l {.. A. \j \ B. card {i' \ A. i' < i} = j}" by (induction xs arbitrary: A B) (auto simp add: nths_Cons card_less_Suc card_less_Suc2) lemma drop_eq_nths: "drop n xs = nths xs {i. i \ n}" by (induction xs arbitrary: n) (auto simp add: nths_Cons nths_all drop_Cons' intro: arg_cong2[where f=nths, OF refl]) lemma nths_drop: "nths (drop n xs) I = nths xs ((+) n ` I)" by(force simp: drop_eq_nths nths_nths simp flip: atLeastLessThan_iff intro: arg_cong2[where f=nths, OF refl]) lemma filter_eq_nths: "filter P xs = nths xs {i. i P(xs!i)}" by(induction xs) (auto simp: nths_Cons) lemma filter_in_nths: "distinct xs \ filter (%x. x \ set (nths xs s)) xs = nths xs s" proof (induct xs arbitrary: s) case Nil thus ?case by simp next case (Cons a xs) then have "\x. x \ set xs \ x \ a" by auto with Cons show ?case by(simp add: nths_Cons cong:filter_cong) qed subsubsection \\<^const>\subseqs\ and \<^const>\List.n_lists\\ lemma length_subseqs: "length (subseqs xs) = 2 ^ length xs" by (induct xs) (simp_all add: Let_def) lemma subseqs_powset: "set ` set (subseqs xs) = Pow (set xs)" proof - have aux: "\x A. set ` Cons x ` A = insert x ` set ` A" by (auto simp add: image_def) have "set (map set (subseqs xs)) = Pow (set xs)" by (induct xs) (simp_all add: aux Let_def Pow_insert Un_commute comp_def del: map_map) then show ?thesis by simp qed lemma distinct_set_subseqs: assumes "distinct xs" shows "distinct (map set (subseqs xs))" proof (rule card_distinct) have "finite (set xs)" .. then have "card (Pow (set xs)) = 2 ^ card (set xs)" by (rule card_Pow) with assms distinct_card [of xs] have "card (Pow (set xs)) = 2 ^ length xs" by simp then show "card (set (map set (subseqs xs))) = length (map set (subseqs xs))" by (simp add: subseqs_powset length_subseqs) qed lemma n_lists_Nil [simp]: "List.n_lists n [] = (if n = 0 then [[]] else [])" by (induct n) simp_all lemma length_n_lists_elem: "ys \ set (List.n_lists n xs) \ length ys = n" by (induct n arbitrary: ys) auto lemma set_n_lists: "set (List.n_lists n xs) = {ys. length ys = n \ set ys \ set xs}" proof (rule set_eqI) fix ys :: "'a list" show "ys \ set (List.n_lists n xs) \ ys \ {ys. length ys = n \ set ys \ set xs}" proof - have "ys \ set (List.n_lists n xs) \ length ys = n" by (induct n arbitrary: ys) auto moreover have "\x. ys \ set (List.n_lists n xs) \ x \ set ys \ x \ set xs" by (induct n arbitrary: ys) auto moreover have "set ys \ set xs \ ys \ set (List.n_lists (length ys) xs)" by (induct ys) auto ultimately show ?thesis by auto qed qed lemma subseqs_refl: "xs \ set (subseqs xs)" by (induct xs) (simp_all add: Let_def) lemma subset_subseqs: "X \ set xs \ X \ set ` set (subseqs xs)" unfolding subseqs_powset by simp lemma Cons_in_subseqsD: "y # ys \ set (subseqs xs) \ ys \ set (subseqs xs)" by (induct xs) (auto simp: Let_def) lemma subseqs_distinctD: "\ ys \ set (subseqs xs); distinct xs \ \ distinct ys" proof (induct xs arbitrary: ys) case (Cons x xs ys) then show ?case by (auto simp: Let_def) (metis Pow_iff contra_subsetD image_eqI subseqs_powset) qed simp subsubsection \\<^const>\splice\\ lemma splice_Nil2 [simp]: "splice xs [] = xs" by (cases xs) simp_all lemma length_splice[simp]: "length(splice xs ys) = length xs + length ys" by (induct xs ys rule: splice.induct) auto lemma split_Nil_iff[simp]: "splice xs ys = [] \ xs = [] \ ys = []" by (induct xs ys rule: splice.induct) auto lemma splice_replicate[simp]: "splice (replicate m x) (replicate n x) = replicate (m+n) x" proof (induction "replicate m x" "replicate n x" arbitrary: m n rule: splice.induct) case (2 x xs) then show ?case by (auto simp add: Cons_replicate_eq dest: gr0_implies_Suc) qed auto subsubsection \\<^const>\shuffles\\ lemma shuffles_commutes: "shuffles xs ys = shuffles ys xs" by (induction xs ys rule: shuffles.induct) (simp_all add: Un_commute) lemma Nil_in_shuffles[simp]: "[] \ shuffles xs ys \ xs = [] \ ys = []" by (induct xs ys rule: shuffles.induct) auto lemma shufflesE: "zs \ shuffles xs ys \ (zs = xs \ ys = [] \ P) \ (zs = ys \ xs = [] \ P) \ (\x xs' z zs'. xs = x # xs' \ zs = z # zs' \ x = z \ zs' \ shuffles xs' ys \ P) \ (\y ys' z zs'. ys = y # ys' \ zs = z # zs' \ y = z \ zs' \ shuffles xs ys' \ P) \ P" by (induct xs ys rule: shuffles.induct) auto lemma Cons_in_shuffles_iff: "z # zs \ shuffles xs ys \ (xs \ [] \ hd xs = z \ zs \ shuffles (tl xs) ys \ ys \ [] \ hd ys = z \ zs \ shuffles xs (tl ys))" by (induct xs ys rule: shuffles.induct) auto lemma splice_in_shuffles [simp, intro]: "splice xs ys \ shuffles xs ys" by (induction xs ys rule: splice.induct) (simp_all add: Cons_in_shuffles_iff shuffles_commutes) lemma Nil_in_shufflesI: "xs = [] \ ys = [] \ [] \ shuffles xs ys" by simp lemma Cons_in_shuffles_leftI: "zs \ shuffles xs ys \ z # zs \ shuffles (z # xs) ys" by (cases ys) auto lemma Cons_in_shuffles_rightI: "zs \ shuffles xs ys \ z # zs \ shuffles xs (z # ys)" by (cases xs) auto lemma finite_shuffles [simp, intro]: "finite (shuffles xs ys)" by (induction xs ys rule: shuffles.induct) simp_all lemma length_shuffles: "zs \ shuffles xs ys \ length zs = length xs + length ys" by (induction xs ys arbitrary: zs rule: shuffles.induct) auto lemma set_shuffles: "zs \ shuffles xs ys \ set zs = set xs \ set ys" by (induction xs ys arbitrary: zs rule: shuffles.induct) auto lemma distinct_disjoint_shuffles: assumes "distinct xs" "distinct ys" "set xs \ set ys = {}" "zs \ shuffles xs ys" shows "distinct zs" using assms proof (induction xs ys arbitrary: zs rule: shuffles.induct) case (3 x xs y ys) show ?case proof (cases zs) case (Cons z zs') with "3.prems" and "3.IH"[of zs'] show ?thesis by (force dest: set_shuffles) qed simp_all qed simp_all lemma Cons_shuffles_subset1: "(#) x ` shuffles xs ys \ shuffles (x # xs) ys" by (cases ys) auto lemma Cons_shuffles_subset2: "(#) y ` shuffles xs ys \ shuffles xs (y # ys)" by (cases xs) auto lemma filter_shuffles: "filter P ` shuffles xs ys = shuffles (filter P xs) (filter P ys)" proof - have *: "filter P ` (#) x ` A = (if P x then (#) x ` filter P ` A else filter P ` A)" for x A by (auto simp: image_image) show ?thesis by (induction xs ys rule: shuffles.induct) (simp_all split: if_splits add: image_Un * Un_absorb1 Un_absorb2 Cons_shuffles_subset1 Cons_shuffles_subset2) qed lemma filter_shuffles_disjoint1: assumes "set xs \ set ys = {}" "zs \ shuffles xs ys" shows "filter (\x. x \ set xs) zs = xs" (is "filter ?P _ = _") and "filter (\x. x \ set xs) zs = ys" (is "filter ?Q _ = _") using assms proof - from assms have "filter ?P zs \ filter ?P ` shuffles xs ys" by blast also have "filter ?P ` shuffles xs ys = shuffles (filter ?P xs) (filter ?P ys)" by (rule filter_shuffles) also have "filter ?P xs = xs" by (rule filter_True) simp_all also have "filter ?P ys = []" by (rule filter_False) (insert assms(1), auto) also have "shuffles xs [] = {xs}" by simp finally show "filter ?P zs = xs" by simp next from assms have "filter ?Q zs \ filter ?Q ` shuffles xs ys" by blast also have "filter ?Q ` shuffles xs ys = shuffles (filter ?Q xs) (filter ?Q ys)" by (rule filter_shuffles) also have "filter ?Q ys = ys" by (rule filter_True) (insert assms(1), auto) also have "filter ?Q xs = []" by (rule filter_False) (insert assms(1), auto) also have "shuffles [] ys = {ys}" by simp finally show "filter ?Q zs = ys" by simp qed lemma filter_shuffles_disjoint2: assumes "set xs \ set ys = {}" "zs \ shuffles xs ys" shows "filter (\x. x \ set ys) zs = ys" "filter (\x. x \ set ys) zs = xs" using filter_shuffles_disjoint1[of ys xs zs] assms by (simp_all add: shuffles_commutes Int_commute) lemma partition_in_shuffles: "xs \ shuffles (filter P xs) (filter (\x. \P x) xs)" proof (induction xs) case (Cons x xs) show ?case proof (cases "P x") case True hence "x # xs \ (#) x ` shuffles (filter P xs) (filter (\x. \P x) xs)" by (intro imageI Cons.IH) also have "\ \ shuffles (filter P (x # xs)) (filter (\x. \P x) (x # xs))" by (simp add: True Cons_shuffles_subset1) finally show ?thesis . next case False hence "x # xs \ (#) x ` shuffles (filter P xs) (filter (\x. \P x) xs)" by (intro imageI Cons.IH) also have "\ \ shuffles (filter P (x # xs)) (filter (\x. \P x) (x # xs))" by (simp add: False Cons_shuffles_subset2) finally show ?thesis . qed qed auto lemma inv_image_partition: assumes "\x. x \ set xs \ P x" "\y. y \ set ys \ \P y" shows "partition P -` {(xs, ys)} = shuffles xs ys" proof (intro equalityI subsetI) fix zs assume zs: "zs \ shuffles xs ys" hence [simp]: "set zs = set xs \ set ys" by (rule set_shuffles) from assms have "filter P zs = filter (\x. x \ set xs) zs" "filter (\x. \P x) zs = filter (\x. x \ set ys) zs" by (intro filter_cong refl; force)+ moreover from assms have "set xs \ set ys = {}" by auto ultimately show "zs \ partition P -` {(xs, ys)}" using zs by (simp add: o_def filter_shuffles_disjoint1 filter_shuffles_disjoint2) next fix zs assume "zs \ partition P -` {(xs, ys)}" thus "zs \ shuffles xs ys" using partition_in_shuffles[of zs] by (auto simp: o_def) qed subsubsection \Transpose\ function transpose where "transpose [] = []" | "transpose ([] # xss) = transpose xss" | "transpose ((x#xs) # xss) = (x # [h. (h#t) \ xss]) # transpose (xs # [t. (h#t) \ xss])" by pat_completeness auto lemma transpose_aux_filter_head: "concat (map (case_list [] (\h t. [h])) xss) = map (\xs. hd xs) (filter (\ys. ys \ []) xss)" by (induct xss) (auto split: list.split) lemma transpose_aux_filter_tail: "concat (map (case_list [] (\h t. [t])) xss) = map (\xs. tl xs) (filter (\ys. ys \ []) xss)" by (induct xss) (auto split: list.split) lemma transpose_aux_max: "max (Suc (length xs)) (foldr (\xs. max (length xs)) xss 0) = Suc (max (length xs) (foldr (\x. max (length x - Suc 0)) (filter (\ys. ys \ []) xss) 0))" (is "max _ ?foldB = Suc (max _ ?foldA)") proof (cases "(filter (\ys. ys \ []) xss) = []") case True hence "foldr (\xs. max (length xs)) xss 0 = 0" proof (induct xss) case (Cons x xs) then have "x = []" by (cases x) auto with Cons show ?case by auto qed simp thus ?thesis using True by simp next case False have foldA: "?foldA = foldr (\x. max (length x)) (filter (\ys. ys \ []) xss) 0 - 1" by (induct xss) auto have foldB: "?foldB = foldr (\x. max (length x)) (filter (\ys. ys \ []) xss) 0" by (induct xss) auto have "0 < ?foldB" proof - from False obtain z zs where zs: "(filter (\ys. ys \ []) xss) = z#zs" by (auto simp: neq_Nil_conv) hence "z \ set (filter (\ys. ys \ []) xss)" by auto hence "z \ []" by auto thus ?thesis unfolding foldB zs by (auto simp: max_def intro: less_le_trans) qed thus ?thesis unfolding foldA foldB max_Suc_Suc[symmetric] by simp qed termination transpose by (relation "measure (\xs. foldr (\xs. max (length xs)) xs 0 + length xs)") (auto simp: transpose_aux_filter_tail foldr_map comp_def transpose_aux_max less_Suc_eq_le) lemma transpose_empty: "(transpose xs = []) \ (\x \ set xs. x = [])" by (induct rule: transpose.induct) simp_all lemma length_transpose: fixes xs :: "'a list list" shows "length (transpose xs) = foldr (\xs. max (length xs)) xs 0" by (induct rule: transpose.induct) (auto simp: transpose_aux_filter_tail foldr_map comp_def transpose_aux_max max_Suc_Suc[symmetric] simp del: max_Suc_Suc) lemma nth_transpose: fixes xs :: "'a list list" assumes "i < length (transpose xs)" shows "transpose xs ! i = map (\xs. xs ! i) (filter (\ys. i < length ys) xs)" using assms proof (induct arbitrary: i rule: transpose.induct) case (3 x xs xss) define XS where "XS = (x # xs) # xss" hence [simp]: "XS \ []" by auto thus ?case proof (cases i) case 0 thus ?thesis by (simp add: transpose_aux_filter_head hd_conv_nth) next case (Suc j) have *: "\xss. xs # map tl xss = map tl ((x#xs)#xss)" by simp have **: "\xss. (x#xs) # filter (\ys. ys \ []) xss = filter (\ys. ys \ []) ((x#xs)#xss)" by simp { fix x have "Suc j < length x \ x \ [] \ j < length x - Suc 0" by (cases x) simp_all } note *** = this have j_less: "j < length (transpose (xs # concat (map (case_list [] (\h t. [t])) xss)))" using "3.prems" by (simp add: transpose_aux_filter_tail length_transpose Suc) show ?thesis unfolding transpose.simps \i = Suc j\ nth_Cons_Suc "3.hyps"[OF j_less] apply (auto simp: transpose_aux_filter_tail filter_map comp_def length_transpose * ** *** XS_def[symmetric]) by (simp add: nth_tl) qed qed simp_all lemma transpose_map_map: "transpose (map (map f) xs) = map (map f) (transpose xs)" proof (rule nth_equalityI) have [simp]: "length (transpose (map (map f) xs)) = length (transpose xs)" by (simp add: length_transpose foldr_map comp_def) show "length (transpose (map (map f) xs)) = length (map (map f) (transpose xs))" by simp fix i assume "i < length (transpose (map (map f) xs))" thus "transpose (map (map f) xs) ! i = map (map f) (transpose xs) ! i" by (simp add: nth_transpose filter_map comp_def) qed subsubsection \\<^const>\min\ and \<^const>\arg_min\\ lemma min_list_Min: "xs \ [] \ min_list xs = Min (set xs)" by (induction xs rule: induct_list012)(auto) lemma f_arg_min_list_f: "xs \ [] \ f (arg_min_list f xs) = Min (f ` (set xs))" by(induction f xs rule: arg_min_list.induct) (auto simp: min_def intro!: antisym) lemma arg_min_list_in: "xs \ [] \ arg_min_list f xs \ set xs" by(induction xs rule: induct_list012) (auto simp: Let_def) subsubsection \(In)finiteness\ lemma finite_maxlen: "finite (M::'a list set) \ \n. \s\M. size s < n" proof (induct rule: finite.induct) case emptyI show ?case by simp next case (insertI M xs) then obtain n where "\s\M. length s < n" by blast hence "\s\insert xs M. size s < max n (size xs) + 1" by auto thus ?case .. qed lemma lists_length_Suc_eq: "{xs. set xs \ A \ length xs = Suc n} = (\(xs, n). n#xs) ` ({xs. set xs \ A \ length xs = n} \ A)" by (auto simp: length_Suc_conv) lemma assumes "finite A" shows finite_lists_length_eq: "finite {xs. set xs \ A \ length xs = n}" and card_lists_length_eq: "card {xs. set xs \ A \ length xs = n} = (card A)^n" using \finite A\ by (induct n) (auto simp: card_image inj_split_Cons lists_length_Suc_eq cong: conj_cong) lemma finite_lists_length_le: assumes "finite A" shows "finite {xs. set xs \ A \ length xs \ n}" (is "finite ?S") proof- have "?S = (\n\{0..n}. {xs. set xs \ A \ length xs = n})" by auto thus ?thesis by (auto intro!: finite_lists_length_eq[OF \finite A\] simp only:) qed lemma card_lists_length_le: assumes "finite A" shows "card {xs. set xs \ A \ length xs \ n} = (\i\n. card A^i)" proof - have "(\i\n. card A^i) = card (\i\n. {xs. set xs \ A \ length xs = i})" using \finite A\ by (subst card_UN_disjoint) (auto simp add: card_lists_length_eq finite_lists_length_eq) also have "(\i\n. {xs. set xs \ A \ length xs = i}) = {xs. set xs \ A \ length xs \ n}" by auto finally show ?thesis by simp qed lemma finite_lists_distinct_length_eq [intro]: assumes "finite A" shows "finite {xs. length xs = n \ distinct xs \ set xs \ A}" (is "finite ?S") proof - have "finite {xs. set xs \ A \ length xs = n}" using \finite A\ by (rule finite_lists_length_eq) moreover have "?S \ {xs. set xs \ A \ length xs = n}" by auto ultimately show ?thesis using finite_subset by auto qed lemma card_lists_distinct_length_eq: assumes "finite A" "k \ card A" shows "card {xs. length xs = k \ distinct xs \ set xs \ A} = \{card A - k + 1 .. card A}" using assms proof (induct k) case 0 then have "{xs. length xs = 0 \ distinct xs \ set xs \ A} = {[]}" by auto then show ?case by simp next case (Suc k) let "?k_list" = "\k xs. length xs = k \ distinct xs \ set xs \ A" have inj_Cons: "\A. inj_on (\(xs, n). n # xs) A" by (rule inj_onI) auto from Suc have "k \ card A" by simp moreover note \finite A\ moreover have "finite {xs. ?k_list k xs}" by (rule finite_subset) (use finite_lists_length_eq[OF \finite A\, of k] in auto) moreover have "\i j. i \ j \ {i} \ (A - set i) \ {j} \ (A - set j) = {}" by auto moreover have "\i. i \ {xs. ?k_list k xs} \ card (A - set i) = card A - k" by (simp add: card_Diff_subset distinct_card) moreover have "{xs. ?k_list (Suc k) xs} = (\(xs, n). n#xs) ` \((\xs. {xs} \ (A - set xs)) ` {xs. ?k_list k xs})" by (auto simp: length_Suc_conv) moreover have "Suc (card A - Suc k) = card A - k" using Suc.prems by simp then have "(card A - k) * \{Suc (card A - k)..card A} = \{Suc (card A - Suc k)..card A}" by (subst prod.insert[symmetric]) (simp add: atLeastAtMost_insertL)+ ultimately show ?case by (simp add: card_image inj_Cons card_UN_disjoint Suc.hyps algebra_simps) qed lemma card_lists_distinct_length_eq': assumes "k < card A" shows "card {xs. length xs = k \ distinct xs \ set xs \ A} = \{card A - k + 1 .. card A}" proof - from \k < card A\ have "finite A" and "k \ card A" using card.infinite by force+ from this show ?thesis by (rule card_lists_distinct_length_eq) qed lemma infinite_UNIV_listI: "\ finite(UNIV::'a list set)" by (metis UNIV_I finite_maxlen length_replicate less_irrefl) lemma same_length_different: assumes "xs \ ys" and "length xs = length ys" shows "\pre x xs' y ys'. x\y \ xs = pre @ [x] @ xs' \ ys = pre @ [y] @ ys'" using assms proof (induction xs arbitrary: ys) case Nil then show ?case by auto next case (Cons x xs) then obtain z zs where ys: "ys = Cons z zs" by (metis length_Suc_conv) show ?case proof (cases "x=z") case True then have "xs \ zs" "length xs = length zs" using Cons.prems ys by auto then obtain pre u xs' v ys' where "u\v" and xs: "xs = pre @ [u] @ xs'" and zs: "zs = pre @ [v] @ys'" using Cons.IH by meson then have "x # xs = (z#pre) @ [u] @ xs' \ ys = (z#pre) @ [v] @ ys'" by (simp add: True ys) with \u\v\ show ?thesis by blast next case False then have "x # xs = [] @ [x] @ xs \ ys = [] @ [z] @ zs" by (simp add: ys) then show ?thesis using False by blast qed qed subsection \Sorting\ subsubsection \\<^const>\sorted_wrt\\ text \Sometimes the second equation in the definition of \<^const>\sorted_wrt\ is too aggressive because it relates each list element to \emph{all} its successors. Then this equation should be removed and \sorted_wrt2_simps\ should be added instead.\ lemma sorted_wrt1: "sorted_wrt P [x] = True" by(simp) lemma sorted_wrt2: "transp P \ sorted_wrt P (x # y # zs) = (P x y \ sorted_wrt P (y # zs))" proof (induction zs arbitrary: x y) case (Cons z zs) then show ?case by simp (meson transpD)+ qed auto lemmas sorted_wrt2_simps = sorted_wrt1 sorted_wrt2 lemma sorted_wrt_true [simp]: "sorted_wrt (\_ _. True) xs" by (induction xs) simp_all lemma sorted_wrt_append: "sorted_wrt P (xs @ ys) \ sorted_wrt P xs \ sorted_wrt P ys \ (\x\set xs. \y\set ys. P x y)" by (induction xs) auto lemma sorted_wrt_map: "sorted_wrt R (map f xs) = sorted_wrt (\x y. R (f x) (f y)) xs" by (induction xs) simp_all lemma assumes "sorted_wrt f xs" shows sorted_wrt_take: "sorted_wrt f (take n xs)" and sorted_wrt_drop: "sorted_wrt f (drop n xs)" proof - from assms have "sorted_wrt f (take n xs @ drop n xs)" by simp thus "sorted_wrt f (take n xs)" and "sorted_wrt f (drop n xs)" unfolding sorted_wrt_append by simp_all qed lemma sorted_wrt_filter: "sorted_wrt f xs \ sorted_wrt f (filter P xs)" by (induction xs) auto lemma sorted_wrt_rev: "sorted_wrt P (rev xs) = sorted_wrt (\x y. P y x) xs" by (induction xs) (auto simp add: sorted_wrt_append) lemma sorted_wrt_mono_rel: "(\x y. \ x \ set xs; y \ set xs; P x y \ \ Q x y) \ sorted_wrt P xs \ sorted_wrt Q xs" by(induction xs)(auto) lemma sorted_wrt01: "length xs \ 1 \ sorted_wrt P xs" by(auto simp: le_Suc_eq length_Suc_conv) lemma sorted_wrt_iff_nth_less: "sorted_wrt P xs = (\i j. i < j \ j < length xs \ P (xs ! i) (xs ! j))" by (induction xs) (auto simp add: in_set_conv_nth Ball_def nth_Cons split: nat.split) lemma sorted_wrt_nth_less: "\ sorted_wrt P xs; i < j; j < length xs \ \ P (xs ! i) (xs ! j)" by(auto simp: sorted_wrt_iff_nth_less) lemma sorted_wrt_iff_nth_Suc_transp: assumes "transp P" shows "sorted_wrt P xs \ (\i. Suc i < length xs \ P (xs!i) (xs!(Suc i)))" (is "?L = ?R") proof assume ?L thus ?R by (simp add: sorted_wrt_iff_nth_less) next assume ?R have "i < j \ j < length xs \ P (xs ! i) (xs ! j)" for i j by(induct i j rule: less_Suc_induct)(simp add: \?R\, meson assms transpE transp_less) thus ?L by (simp add: sorted_wrt_iff_nth_less) qed lemma sorted_wrt_upt[simp]: "sorted_wrt (<) [m..Each element is greater or equal to its index:\ lemma sorted_wrt_less_idx: "sorted_wrt (<) ns \ i < length ns \ i \ ns!i" proof (induction ns arbitrary: i rule: rev_induct) case Nil thus ?case by simp next case snoc thus ?case by (auto simp: nth_append sorted_wrt_append) (metis less_antisym not_less nth_mem) qed subsubsection \\<^const>\sorted\\ context linorder begin text \Sometimes the second equation in the definition of \<^const>\sorted\ is too aggressive because it relates each list element to \emph{all} its successors. Then this equation should be removed and \sorted2_simps\ should be added instead. Executable code is one such use case.\ lemma sorted0: "sorted [] = True" by simp lemma sorted1: "sorted [x] = True" by simp lemma sorted2: "sorted (x # y # zs) = (x \ y \ sorted (y # zs))" by(induction zs) auto lemmas sorted2_simps = sorted1 sorted2 lemmas [code] = sorted0 sorted2_simps lemma sorted_append: "sorted (xs@ys) = (sorted xs \ sorted ys \ (\x \ set xs. \y \ set ys. x\y))" by (simp add: sorted_wrt_append) lemma sorted_map: "sorted (map f xs) = sorted_wrt (\x y. f x \ f y) xs" by (simp add: sorted_wrt_map) lemma sorted01: "length xs \ 1 \ sorted xs" by (simp add: sorted_wrt01) lemma sorted_tl: "sorted xs \ sorted (tl xs)" by (cases xs) (simp_all) lemma sorted_iff_nth_mono_less: "sorted xs = (\i j. i < j \ j < length xs \ xs ! i \ xs ! j)" by (simp add: sorted_wrt_iff_nth_less) lemma sorted_iff_nth_mono: "sorted xs = (\i j. i \ j \ j < length xs \ xs ! i \ xs ! j)" by (auto simp: sorted_iff_nth_mono_less nat_less_le) lemma sorted_nth_mono: "sorted xs \ i \ j \ j < length xs \ xs!i \ xs!j" by (auto simp: sorted_iff_nth_mono) lemma sorted_iff_nth_Suc: "sorted xs \ (\i. Suc i < length xs \ xs!i \ xs!(Suc i))" by(simp add: sorted_wrt_iff_nth_Suc_transp) lemma sorted_rev_nth_mono: "sorted (rev xs) \ i \ j \ j < length xs \ xs!j \ xs!i" using sorted_nth_mono[ of "rev xs" "length xs - j - 1" "length xs - i - 1"] rev_nth[of "length xs - i - 1" "xs"] rev_nth[of "length xs - j - 1" "xs"] by auto lemma sorted_rev_iff_nth_mono: "sorted (rev xs) \ (\ i j. i \ j \ j < length xs \ xs!j \ xs!i)" (is "?L = ?R") proof assume ?L thus ?R by (blast intro: sorted_rev_nth_mono) next assume ?R have "rev xs ! k \ rev xs ! l" if asms: "k \ l" "l < length(rev xs)" for k l proof - have "k < length xs" "l < length xs" "length xs - Suc l \ length xs - Suc k" "length xs - Suc k < length xs" using asms by auto thus "rev xs ! k \ rev xs ! l" using \?R\ \k \ l\ unfolding rev_nth[OF \k < length xs\] rev_nth[OF \l < length xs\] by blast qed thus ?L by (simp add: sorted_iff_nth_mono) qed lemma sorted_rev_iff_nth_Suc: "sorted (rev xs) \ (\i. Suc i < length xs \ xs!(Suc i) \ xs!i)" proof- interpret dual: linorder "(\x y. y \ x)" "(\x y. y < x)" using dual_linorder . show ?thesis using dual_linorder dual.sorted_iff_nth_Suc dual.sorted_iff_nth_mono unfolding sorted_rev_iff_nth_mono by simp qed lemma sorted_map_remove1: "sorted (map f xs) \ sorted (map f (remove1 x xs))" by (induct xs) (auto) lemma sorted_remove1: "sorted xs \ sorted (remove1 a xs)" using sorted_map_remove1 [of "\x. x"] by simp lemma sorted_butlast: assumes "xs \ []" and "sorted xs" shows "sorted (butlast xs)" proof - from \xs \ []\ obtain ys y where "xs = ys @ [y]" by (cases xs rule: rev_cases) auto with \sorted xs\ show ?thesis by (simp add: sorted_append) qed lemma sorted_replicate [simp]: "sorted(replicate n x)" by(induction n) (auto) lemma sorted_remdups[simp]: "sorted xs \ sorted (remdups xs)" by (induct xs) (auto) lemma sorted_remdups_adj[simp]: "sorted xs \ sorted (remdups_adj xs)" by (induct xs rule: remdups_adj.induct, simp_all split: if_split_asm) lemma sorted_nths: "sorted xs \ sorted (nths xs I)" by(induction xs arbitrary: I)(auto simp: nths_Cons) lemma sorted_distinct_set_unique: assumes "sorted xs" "distinct xs" "sorted ys" "distinct ys" "set xs = set ys" shows "xs = ys" proof - from assms have 1: "length xs = length ys" by (auto dest!: distinct_card) from assms show ?thesis proof(induct rule:list_induct2[OF 1]) case 1 show ?case by simp next case (2 x xs y ys) then show ?case by (cases \x = y\) (auto simp add: insert_eq_iff) qed qed lemma map_sorted_distinct_set_unique: assumes "inj_on f (set xs \ set ys)" assumes "sorted (map f xs)" "distinct (map f xs)" "sorted (map f ys)" "distinct (map f ys)" assumes "set xs = set ys" shows "xs = ys" proof - from assms have "map f xs = map f ys" by (simp add: sorted_distinct_set_unique) with \inj_on f (set xs \ set ys)\ show "xs = ys" by (blast intro: map_inj_on) qed lemma assumes "sorted xs" shows sorted_take: "sorted (take n xs)" and sorted_drop: "sorted (drop n xs)" proof - from assms have "sorted (take n xs @ drop n xs)" by simp then show "sorted (take n xs)" and "sorted (drop n xs)" unfolding sorted_append by simp_all qed lemma sorted_dropWhile: "sorted xs \ sorted (dropWhile P xs)" by (auto dest: sorted_drop simp add: dropWhile_eq_drop) lemma sorted_takeWhile: "sorted xs \ sorted (takeWhile P xs)" by (subst takeWhile_eq_take) (auto dest: sorted_take) lemma sorted_filter: "sorted (map f xs) \ sorted (map f (filter P xs))" by (induct xs) simp_all lemma foldr_max_sorted: assumes "sorted (rev xs)" shows "foldr max xs y = (if xs = [] then y else max (xs ! 0) y)" using assms proof (induct xs) case (Cons x xs) then have "sorted (rev xs)" using sorted_append by auto with Cons show ?case by (cases xs) (auto simp add: sorted_append max_def) qed simp lemma filter_equals_takeWhile_sorted_rev: assumes sorted: "sorted (rev (map f xs))" shows "filter (\x. t < f x) xs = takeWhile (\ x. t < f x) xs" (is "filter ?P xs = ?tW") proof (rule takeWhile_eq_filter[symmetric]) let "?dW" = "dropWhile ?P xs" fix x assume "x \ set ?dW" then obtain i where i: "i < length ?dW" and nth_i: "x = ?dW ! i" unfolding in_set_conv_nth by auto hence "length ?tW + i < length (?tW @ ?dW)" unfolding length_append by simp hence i': "length (map f ?tW) + i < length (map f xs)" by simp have "(map f ?tW @ map f ?dW) ! (length (map f ?tW) + i) \ (map f ?tW @ map f ?dW) ! (length (map f ?tW) + 0)" using sorted_rev_nth_mono[OF sorted _ i', of "length ?tW"] unfolding map_append[symmetric] by simp hence "f x \ f (?dW ! 0)" unfolding nth_append_length_plus nth_i using i preorder_class.le_less_trans[OF le0 i] by simp also have "... \ t" using hd_dropWhile[of "?P" xs] le0[THEN preorder_class.le_less_trans, OF i] using hd_conv_nth[of "?dW"] by simp finally show "\ t < f x" by simp qed lemma sorted_map_same: "sorted (map f (filter (\x. f x = g xs) xs))" proof (induct xs arbitrary: g) case Nil then show ?case by simp next case (Cons x xs) then have "sorted (map f (filter (\y. f y = (\xs. f x) xs) xs))" . moreover from Cons have "sorted (map f (filter (\y. f y = (g \ Cons x) xs) xs))" . ultimately show ?case by simp_all qed lemma sorted_same: "sorted (filter (\x. x = g xs) xs)" using sorted_map_same [of "\x. x"] by simp end lemma sorted_upt[simp]: "sorted [m..Sorting functions\ text\Currently it is not shown that \<^const>\sort\ returns a permutation of its input because the nicest proof is via multisets, which are not part of Main. Alternatively one could define a function that counts the number of occurrences of an element in a list and use that instead of multisets to state the correctness property.\ context linorder begin lemma set_insort_key: "set (insort_key f x xs) = insert x (set xs)" by (induct xs) auto lemma length_insort [simp]: "length (insort_key f x xs) = Suc (length xs)" by (induct xs) simp_all lemma insort_key_left_comm: assumes "f x \ f y" shows "insort_key f y (insort_key f x xs) = insort_key f x (insort_key f y xs)" by (induct xs) (auto simp add: assms dest: order.antisym) lemma insort_left_comm: "insort x (insort y xs) = insort y (insort x xs)" by (cases "x = y") (auto intro: insort_key_left_comm) lemma comp_fun_commute_insort: "comp_fun_commute insort" proof qed (simp add: insort_left_comm fun_eq_iff) lemma sort_key_simps [simp]: "sort_key f [] = []" "sort_key f (x#xs) = insort_key f x (sort_key f xs)" by (simp_all add: sort_key_def) lemma sort_key_conv_fold: assumes "inj_on f (set xs)" shows "sort_key f xs = fold (insort_key f) xs []" proof - have "fold (insort_key f) (rev xs) = fold (insort_key f) xs" proof (rule fold_rev, rule ext) fix zs fix x y assume "x \ set xs" "y \ set xs" with assms have *: "f y = f x \ y = x" by (auto dest: inj_onD) have **: "x = y \ y = x" by auto show "(insort_key f y \ insort_key f x) zs = (insort_key f x \ insort_key f y) zs" by (induct zs) (auto intro: * simp add: **) qed then show ?thesis by (simp add: sort_key_def foldr_conv_fold) qed lemma sort_conv_fold: "sort xs = fold insort xs []" by (rule sort_key_conv_fold) simp lemma length_sort[simp]: "length (sort_key f xs) = length xs" by (induct xs, auto) lemma set_sort[simp]: "set(sort_key f xs) = set xs" by (induct xs) (simp_all add: set_insort_key) lemma distinct_insort: "distinct (insort_key f x xs) = (x \ set xs \ distinct xs)" by(induct xs)(auto simp: set_insort_key) lemma distinct_insort_key: "distinct (map f (insort_key f x xs)) = (f x \ f ` set xs \ (distinct (map f xs)))" by (induct xs) (auto simp: set_insort_key) lemma distinct_sort[simp]: "distinct (sort_key f xs) = distinct xs" by (induct xs) (simp_all add: distinct_insort) lemma sorted_insort_key: "sorted (map f (insort_key f x xs)) = sorted (map f xs)" by (induct xs) (auto simp: set_insort_key) lemma sorted_insort: "sorted (insort x xs) = sorted xs" using sorted_insort_key [where f="\x. x"] by simp theorem sorted_sort_key [simp]: "sorted (map f (sort_key f xs))" by (induct xs) (auto simp:sorted_insort_key) theorem sorted_sort [simp]: "sorted (sort xs)" using sorted_sort_key [where f="\x. x"] by simp lemma insort_not_Nil [simp]: "insort_key f a xs \ []" by (induction xs) simp_all lemma insort_is_Cons: "\x\set xs. f a \ f x \ insort_key f a xs = a # xs" by (cases xs) auto lemma sorted_sort_id: "sorted xs \ sort xs = xs" by (induct xs) (auto simp add: insort_is_Cons) lemma insort_key_remove1: assumes "a \ set xs" and "sorted (map f xs)" and "hd (filter (\x. f a = f x) xs) = a" shows "insort_key f a (remove1 a xs) = xs" using assms proof (induct xs) case (Cons x xs) then show ?case proof (cases "x = a") case False then have "f x \ f a" using Cons.prems by auto then have "f x < f a" using Cons.prems by auto with \f x \ f a\ show ?thesis using Cons by (auto simp: insort_is_Cons) qed (auto simp: insort_is_Cons) qed simp lemma insort_remove1: assumes "a \ set xs" and "sorted xs" shows "insort a (remove1 a xs) = xs" proof (rule insort_key_remove1) define n where "n = length (filter ((=) a) xs) - 1" from \a \ set xs\ show "a \ set xs" . from \sorted xs\ show "sorted (map (\x. x) xs)" by simp from \a \ set xs\ have "a \ set (filter ((=) a) xs)" by auto then have "set (filter ((=) a) xs) \ {}" by auto then have "filter ((=) a) xs \ []" by (auto simp only: set_empty) then have "length (filter ((=) a) xs) > 0" by simp then have n: "Suc n = length (filter ((=) a) xs)" by (simp add: n_def) moreover have "replicate (Suc n) a = a # replicate n a" by simp ultimately show "hd (filter ((=) a) xs) = a" by (simp add: replicate_length_filter) qed lemma finite_sorted_distinct_unique: assumes "finite A" shows "\!xs. set xs = A \ sorted xs \ distinct xs" proof - obtain xs where "distinct xs" "A = set xs" using finite_distinct_list [OF assms] by metis then show ?thesis by (rule_tac a="sort xs" in ex1I) (auto simp: sorted_distinct_set_unique) qed lemma insort_insert_key_triv: "f x \ f ` set xs \ insort_insert_key f x xs = xs" by (simp add: insort_insert_key_def) lemma insort_insert_triv: "x \ set xs \ insort_insert x xs = xs" using insort_insert_key_triv [of "\x. x"] by simp lemma insort_insert_insort_key: "f x \ f ` set xs \ insort_insert_key f x xs = insort_key f x xs" by (simp add: insort_insert_key_def) lemma insort_insert_insort: "x \ set xs \ insort_insert x xs = insort x xs" using insort_insert_insort_key [of "\x. x"] by simp lemma set_insort_insert: "set (insort_insert x xs) = insert x (set xs)" by (auto simp add: insort_insert_key_def set_insort_key) lemma distinct_insort_insert: assumes "distinct xs" shows "distinct (insort_insert_key f x xs)" using assms by (induct xs) (auto simp add: insort_insert_key_def set_insort_key) lemma sorted_insort_insert_key: assumes "sorted (map f xs)" shows "sorted (map f (insort_insert_key f x xs))" using assms by (simp add: insort_insert_key_def sorted_insort_key) lemma sorted_insort_insert: assumes "sorted xs" shows "sorted (insort_insert x xs)" using assms sorted_insort_insert_key [of "\x. x"] by simp lemma filter_insort_triv: "\ P x \ filter P (insort_key f x xs) = filter P xs" by (induct xs) simp_all lemma filter_insort: "sorted (map f xs) \ P x \ filter P (insort_key f x xs) = insort_key f x (filter P xs)" by (induct xs) (auto, subst insort_is_Cons, auto) lemma filter_sort: "filter P (sort_key f xs) = sort_key f (filter P xs)" by (induct xs) (simp_all add: filter_insort_triv filter_insort) lemma remove1_insort_key [simp]: "remove1 x (insort_key f x xs) = xs" by (induct xs) simp_all end lemma sort_upt [simp]: "sort [m.. \x \ set xs. P x \ List.find P xs = Some (Min {x\set xs. P x})" proof (induct xs) case Nil then show ?case by simp next case (Cons x xs) show ?case proof (cases "P x") case True with Cons show ?thesis by (auto intro: Min_eqI [symmetric]) next case False then have "{y. (y = x \ y \ set xs) \ P y} = {y \ set xs. P y}" by auto with Cons False show ?thesis by (simp_all) qed qed lemma sorted_enumerate [simp]: "sorted (map fst (enumerate n xs))" by (simp add: enumerate_eq_zip) text \Stability of \<^const>\sort_key\:\ lemma sort_key_stable: "filter (\y. f y = k) (sort_key f xs) = filter (\y. f y = k) xs" by (induction xs) (auto simp: filter_insort insort_is_Cons filter_insort_triv) corollary stable_sort_key_sort_key: "stable_sort_key sort_key" by(simp add: stable_sort_key_def sort_key_stable) lemma sort_key_const: "sort_key (\x. c) xs = xs" by (metis (mono_tags) filter_True sort_key_stable) subsubsection \\<^const>\transpose\ on sorted lists\ lemma sorted_transpose[simp]: "sorted (rev (map length (transpose xs)))" by (auto simp: sorted_iff_nth_mono rev_nth nth_transpose length_filter_conv_card intro: card_mono) lemma transpose_max_length: "foldr (\xs. max (length xs)) (transpose xs) 0 = length (filter (\x. x \ []) xs)" (is "?L = ?R") proof (cases "transpose xs = []") case False have "?L = foldr max (map length (transpose xs)) 0" by (simp add: foldr_map comp_def) also have "... = length (transpose xs ! 0)" using False sorted_transpose by (simp add: foldr_max_sorted) finally show ?thesis using False by (simp add: nth_transpose) next case True hence "filter (\x. x \ []) xs = []" by (auto intro!: filter_False simp: transpose_empty) thus ?thesis by (simp add: transpose_empty True) qed lemma length_transpose_sorted: fixes xs :: "'a list list" assumes sorted: "sorted (rev (map length xs))" shows "length (transpose xs) = (if xs = [] then 0 else length (xs ! 0))" proof (cases "xs = []") case False thus ?thesis using foldr_max_sorted[OF sorted] False unfolding length_transpose foldr_map comp_def by simp qed simp lemma nth_nth_transpose_sorted[simp]: fixes xs :: "'a list list" assumes sorted: "sorted (rev (map length xs))" and i: "i < length (transpose xs)" and j: "j < length (filter (\ys. i < length ys) xs)" shows "transpose xs ! i ! j = xs ! j ! i" using j filter_equals_takeWhile_sorted_rev[OF sorted, of i] nth_transpose[OF i] nth_map[OF j] by (simp add: takeWhile_nth) lemma transpose_column_length: fixes xs :: "'a list list" assumes sorted: "sorted (rev (map length xs))" and "i < length xs" shows "length (filter (\ys. i < length ys) (transpose xs)) = length (xs ! i)" proof - have "xs \ []" using \i < length xs\ by auto note filter_equals_takeWhile_sorted_rev[OF sorted, simp] { fix j assume "j \ i" note sorted_rev_nth_mono[OF sorted, of j i, simplified, OF this \i < length xs\] } note sortedE = this[consumes 1] have "{j. j < length (transpose xs) \ i < length (transpose xs ! j)} = {..< length (xs ! i)}" proof safe fix j assume "j < length (transpose xs)" and "i < length (transpose xs ! j)" with this(2) nth_transpose[OF this(1)] have "i < length (takeWhile (\ys. j < length ys) xs)" by simp from nth_mem[OF this] takeWhile_nth[OF this] show "j < length (xs ! i)" by (auto dest: set_takeWhileD) next fix j assume "j < length (xs ! i)" thus "j < length (transpose xs)" using foldr_max_sorted[OF sorted] \xs \ []\ sortedE[OF le0] by (auto simp: length_transpose comp_def foldr_map) have "Suc i \ length (takeWhile (\ys. j < length ys) xs)" using \i < length xs\ \j < length (xs ! i)\ less_Suc_eq_le by (auto intro!: length_takeWhile_less_P_nth dest!: sortedE) with nth_transpose[OF \j < length (transpose xs)\] show "i < length (transpose xs ! j)" by simp qed thus ?thesis by (simp add: length_filter_conv_card) qed lemma transpose_column: fixes xs :: "'a list list" assumes sorted: "sorted (rev (map length xs))" and "i < length xs" shows "map (\ys. ys ! i) (filter (\ys. i < length ys) (transpose xs)) = xs ! i" (is "?R = _") proof (rule nth_equalityI) show length: "length ?R = length (xs ! i)" using transpose_column_length[OF assms] by simp fix j assume j: "j < length ?R" note * = less_le_trans[OF this, unfolded length_map, OF length_filter_le] from j have j_less: "j < length (xs ! i)" using length by simp have i_less_tW: "Suc i \ length (takeWhile (\ys. Suc j \ length ys) xs)" proof (rule length_takeWhile_less_P_nth) show "Suc i \ length xs" using \i < length xs\ by simp fix k assume "k < Suc i" hence "k \ i" by auto with sorted_rev_nth_mono[OF sorted this] \i < length xs\ have "length (xs ! i) \ length (xs ! k)" by simp thus "Suc j \ length (xs ! k)" using j_less by simp qed have i_less_filter: "i < length (filter (\ys. j < length ys) xs) " unfolding filter_equals_takeWhile_sorted_rev[OF sorted, of j] using i_less_tW by (simp_all add: Suc_le_eq) from j show "?R ! j = xs ! i ! j" unfolding filter_equals_takeWhile_sorted_rev[OF sorted_transpose, of i] by (simp add: takeWhile_nth nth_nth_transpose_sorted[OF sorted * i_less_filter]) qed lemma transpose_transpose: fixes xs :: "'a list list" assumes sorted: "sorted (rev (map length xs))" shows "transpose (transpose xs) = takeWhile (\x. x \ []) xs" (is "?L = ?R") proof - have len: "length ?L = length ?R" unfolding length_transpose transpose_max_length using filter_equals_takeWhile_sorted_rev[OF sorted, of 0] by simp { fix i assume "i < length ?R" with less_le_trans[OF _ length_takeWhile_le[of _ xs]] have "i < length xs" by simp } note * = this show ?thesis by (rule nth_equalityI) (simp_all add: len nth_transpose transpose_column[OF sorted] * takeWhile_nth) qed theorem transpose_rectangle: assumes "xs = [] \ n = 0" assumes rect: "\ i. i < length xs \ length (xs ! i) = n" shows "transpose xs = map (\ i. map (\ j. xs ! j ! i) [0..ys. i < length ys) xs = xs" using rect by (auto simp: in_set_conv_nth intro!: filter_True) } ultimately show "\i. i < length (transpose xs) \ ?trans ! i = ?map ! i" by (auto simp: nth_transpose intro: nth_equalityI) qed subsubsection \\sorted_key_list_of_set\\ text\ This function maps (finite) linearly ordered sets to sorted lists. The linear order is obtained by a key function that maps the elements of the set to a type that is linearly ordered. Warning: in most cases it is not a good idea to convert from sets to lists but one should convert in the other direction (via \<^const>\set\). Note: this is a generalisation of the older \sorted_list_of_set\ that is obtained by setting the key function to the identity. Consequently, new theorems should be added to the locale below. They should also be aliased to more convenient names for use with \sorted_list_of_set\ as seen further below. \ definition (in linorder) sorted_key_list_of_set :: "('b \ 'a) \ 'b set \ 'b list" where "sorted_key_list_of_set f \ folding_on.F (insort_key f) []" locale folding_insort_key = lo?: linorder "less_eq :: 'a \ 'a \ bool" less for less_eq (infix "\" 50) and less (infix "\" 50) + fixes S fixes f :: "'b \ 'a" assumes inj_on: "inj_on f S" begin lemma insort_key_commute: "x \ S \ y \ S \ insort_key f y o insort_key f x = insort_key f x o insort_key f y" proof(rule ext, goal_cases) case (1 xs) with inj_on show ?case by (induction xs) (auto simp: inj_onD) qed sublocale fold_insort_key: folding_on S "insort_key f" "[]" rewrites "folding_on.F (insort_key f) [] = sorted_key_list_of_set f" proof - show "folding_on S (insort_key f)" by standard (simp add: insort_key_commute) qed (simp add: sorted_key_list_of_set_def) lemma idem_if_sorted_distinct: assumes "set xs \ S" and "sorted (map f xs)" "distinct xs" shows "sorted_key_list_of_set f (set xs) = xs" proof(cases "S = {}") case True then show ?thesis using \set xs \ S\ by auto next case False with assms show ?thesis proof(induction xs) case (Cons a xs) with Cons show ?case by (cases xs) auto qed simp qed lemma sorted_key_list_of_set_empty: "sorted_key_list_of_set f {} = []" by (fact fold_insort_key.empty) lemma sorted_key_list_of_set_insert: assumes "insert x A \ S" and "finite A" "x \ A" shows "sorted_key_list_of_set f (insert x A) = insort_key f x (sorted_key_list_of_set f A)" using assms by (fact fold_insort_key.insert) lemma sorted_key_list_of_set_insert_remove [simp]: assumes "insert x A \ S" and "finite A" shows "sorted_key_list_of_set f (insert x A) = insort_key f x (sorted_key_list_of_set f (A - {x}))" using assms by (fact fold_insort_key.insert_remove) lemma sorted_key_list_of_set_eq_Nil_iff [simp]: assumes "A \ S" and "finite A" shows "sorted_key_list_of_set f A = [] \ A = {}" using assms by (auto simp: fold_insort_key.remove) lemma set_sorted_key_list_of_set [simp]: assumes "A \ S" and "finite A" shows "set (sorted_key_list_of_set f A) = A" using assms(2,1) by (induct A rule: finite_induct) (simp_all add: set_insort_key) lemma sorted_sorted_key_list_of_set [simp]: assumes "A \ S" shows "sorted (map f (sorted_key_list_of_set f A))" proof (cases "finite A") case True thus ?thesis using \A \ S\ by (induction A) (simp_all add: sorted_insort_key) next case False thus ?thesis by simp qed lemma distinct_if_distinct_map: "distinct (map f xs) \ distinct xs" using inj_on by (simp add: distinct_map) lemma distinct_sorted_key_list_of_set [simp]: assumes "A \ S" shows "distinct (map f (sorted_key_list_of_set f A))" proof (cases "finite A") case True thus ?thesis using \A \ S\ inj_on by (induction A) (force simp: distinct_insort_key dest: inj_onD)+ next case False thus ?thesis by simp qed lemma length_sorted_key_list_of_set [simp]: assumes "A \ S" shows "length (sorted_key_list_of_set f A) = card A" proof (cases "finite A") case True with assms inj_on show ?thesis using distinct_card[symmetric, OF distinct_sorted_key_list_of_set] by (auto simp: subset_inj_on intro!: card_image) qed auto lemmas sorted_key_list_of_set = set_sorted_key_list_of_set sorted_sorted_key_list_of_set distinct_sorted_key_list_of_set lemma sorted_key_list_of_set_remove: assumes "insert x A \ S" and "finite A" shows "sorted_key_list_of_set f (A - {x}) = remove1 x (sorted_key_list_of_set f A)" proof (cases "x \ A") case False with assms have "x \ set (sorted_key_list_of_set f A)" by simp with False show ?thesis by (simp add: remove1_idem) next case True then obtain B where A: "A = insert x B" by (rule Set.set_insert) with assms show ?thesis by simp qed lemma strict_sorted_key_list_of_set [simp]: "A \ S \ sorted_wrt (\) (map f (sorted_key_list_of_set f A))" by (cases "finite A") (auto simp: strict_sorted_iff subset_inj_on[OF inj_on]) lemma finite_set_strict_sorted: assumes "A \ S" and "finite A" obtains l where "sorted_wrt (\) (map f l)" "set l = A" "length l = card A" using assms by (meson length_sorted_key_list_of_set set_sorted_key_list_of_set strict_sorted_key_list_of_set) lemma (in linorder) strict_sorted_equal: assumes "sorted_wrt (<) xs" and "sorted_wrt (<) ys" and "set ys = set xs" shows "ys = xs" using assms proof (induction xs arbitrary: ys) case (Cons x xs) show ?case proof (cases ys) case Nil then show ?thesis using Cons.prems by auto next case (Cons y ys') then have "xs = ys'" by (metis Cons.prems list.inject sorted_distinct_set_unique strict_sorted_iff) moreover have "x = y" using Cons.prems \xs = ys'\ local.Cons by fastforce ultimately show ?thesis using local.Cons by blast qed qed auto lemma (in linorder) strict_sorted_equal_Uniq: "\\<^sub>\\<^sub>1xs. sorted_wrt (<) xs \ set xs = A" by (simp add: Uniq_def strict_sorted_equal) lemma sorted_key_list_of_set_inject: assumes "A \ S" "B \ S" assumes "sorted_key_list_of_set f A = sorted_key_list_of_set f B" "finite A" "finite B" shows "A = B" using assms set_sorted_key_list_of_set by metis lemma sorted_key_list_of_set_unique: assumes "A \ S" and "finite A" shows "sorted_wrt (\) (map f l) \ set l = A \ length l = card A \ sorted_key_list_of_set f A = l" using assms by (auto simp: strict_sorted_iff card_distinct idem_if_sorted_distinct) end context linorder begin definition "sorted_list_of_set \ sorted_key_list_of_set (\x::'a. x)" text \ We abuse the \rewrites\ functionality of locales to remove trivial assumptions that result from instantiating the key function to the identity. \ sublocale sorted_list_of_set: folding_insort_key "(\)" "(<)" UNIV "(\x. x)" rewrites "sorted_key_list_of_set (\x. x) = sorted_list_of_set" and "\xs. map (\x. x) xs \ xs" and "\X. (X \ UNIV) \ True" and "\x. x \ UNIV \ True" and "\P. (True \ P) \ Trueprop P" and "\P Q. (True \ PROP P \ PROP Q) \ (PROP P \ True \ PROP Q)" proof - show "folding_insort_key (\) (<) UNIV (\x. x)" by standard simp qed (simp_all add: sorted_list_of_set_def) text \Alias theorems for backwards compatibility and ease of use.\ lemmas sorted_list_of_set = sorted_list_of_set.sorted_key_list_of_set and sorted_list_of_set_empty = sorted_list_of_set.sorted_key_list_of_set_empty and sorted_list_of_set_insert = sorted_list_of_set.sorted_key_list_of_set_insert and sorted_list_of_set_insert_remove = sorted_list_of_set.sorted_key_list_of_set_insert_remove and sorted_list_of_set_eq_Nil_iff = sorted_list_of_set.sorted_key_list_of_set_eq_Nil_iff and set_sorted_list_of_set = sorted_list_of_set.set_sorted_key_list_of_set and sorted_sorted_list_of_set = sorted_list_of_set.sorted_sorted_key_list_of_set and distinct_sorted_list_of_set = sorted_list_of_set.distinct_sorted_key_list_of_set and length_sorted_list_of_set = sorted_list_of_set.length_sorted_key_list_of_set and sorted_list_of_set_remove = sorted_list_of_set.sorted_key_list_of_set_remove and strict_sorted_list_of_set = sorted_list_of_set.strict_sorted_key_list_of_set and sorted_list_of_set_inject = sorted_list_of_set.sorted_key_list_of_set_inject and sorted_list_of_set_unique = sorted_list_of_set.sorted_key_list_of_set_unique and finite_set_strict_sorted = sorted_list_of_set.finite_set_strict_sorted lemma sorted_list_of_set_sort_remdups [code]: "sorted_list_of_set (set xs) = sort (remdups xs)" proof - interpret comp_fun_commute insort by (fact comp_fun_commute_insort) show ?thesis by (simp add: sorted_list_of_set.fold_insort_key.eq_fold sort_conv_fold fold_set_fold_remdups) qed end lemma sorted_list_of_set_range [simp]: "sorted_list_of_set {m.. {}" shows "sorted_list_of_set A = Min A # sorted_list_of_set (A - {Min A})" using assms by (auto simp: less_le simp flip: sorted_list_of_set.sorted_key_list_of_set_unique intro: Min_in) lemma sorted_list_of_set_greaterThanLessThan: assumes "Suc i < j" shows "sorted_list_of_set {i<.. j" shows "sorted_list_of_set {i<..j} = Suc i # sorted_list_of_set {Suc i<..j}" using sorted_list_of_set_greaterThanLessThan [of i "Suc j"] by (metis assms greaterThanAtMost_def greaterThanLessThan_eq le_imp_less_Suc lessThan_Suc_atMost) lemma nth_sorted_list_of_set_greaterThanLessThan: "n < j - Suc i \ sorted_list_of_set {i<.. sorted_list_of_set {i<..j} ! n = Suc (i+n)" using nth_sorted_list_of_set_greaterThanLessThan [of n "Suc j" i] by (simp add: greaterThanAtMost_def greaterThanLessThan_eq lessThan_Suc_atMost) subsubsection \\lists\: the list-forming operator over sets\ inductive_set lists :: "'a set => 'a list set" for A :: "'a set" where Nil [intro!, simp]: "[] \ lists A" | Cons [intro!, simp]: "\a \ A; l \ lists A\ \ a#l \ lists A" inductive_cases listsE [elim!]: "x#l \ lists A" inductive_cases listspE [elim!]: "listsp A (x # l)" inductive_simps listsp_simps[code]: "listsp A []" "listsp A (x # xs)" lemma listsp_mono [mono]: "A \ B \ listsp A \ listsp B" by (rule predicate1I, erule listsp.induct, blast+) lemmas lists_mono = listsp_mono [to_set] lemma listsp_infI: assumes l: "listsp A l" shows "listsp B l \ listsp (inf A B) l" using l by induct blast+ lemmas lists_IntI = listsp_infI [to_set] lemma listsp_inf_eq [simp]: "listsp (inf A B) = inf (listsp A) (listsp B)" proof (rule mono_inf [where f=listsp, THEN order_antisym]) show "mono listsp" by (simp add: mono_def listsp_mono) show "inf (listsp A) (listsp B) \ listsp (inf A B)" by (blast intro!: listsp_infI) qed lemmas listsp_conj_eq [simp] = listsp_inf_eq [simplified inf_fun_def inf_bool_def] lemmas lists_Int_eq [simp] = listsp_inf_eq [to_set] lemma Cons_in_lists_iff[simp]: "x#xs \ lists A \ x \ A \ xs \ lists A" by auto lemma append_in_listsp_conv [iff]: "(listsp A (xs @ ys)) = (listsp A xs \ listsp A ys)" by (induct xs) auto lemmas append_in_lists_conv [iff] = append_in_listsp_conv [to_set] lemma in_listsp_conv_set: "(listsp A xs) = (\x \ set xs. A x)" \ \eliminate \listsp\ in favour of \set\\ by (induct xs) auto lemmas in_lists_conv_set [code_unfold] = in_listsp_conv_set [to_set] lemma in_listspD [dest!]: "listsp A xs \ \x\set xs. A x" by (rule in_listsp_conv_set [THEN iffD1]) lemmas in_listsD [dest!] = in_listspD [to_set] lemma in_listspI [intro!]: "\x\set xs. A x \ listsp A xs" by (rule in_listsp_conv_set [THEN iffD2]) lemmas in_listsI [intro!] = in_listspI [to_set] lemma lists_eq_set: "lists A = {xs. set xs \ A}" by auto lemma lists_empty [simp]: "lists {} = {[]}" by auto lemma lists_UNIV [simp]: "lists UNIV = UNIV" by auto lemma lists_image: "lists (f`A) = map f ` lists A" proof - { fix xs have "\x\set xs. x \ f ` A \ xs \ map f ` lists A" by (induct xs) (auto simp del: list.map simp add: list.map[symmetric] intro!: imageI) } then show ?thesis by auto qed subsubsection \Inductive definition for membership\ inductive ListMem :: "'a \ 'a list \ bool" where elem: "ListMem x (x # xs)" | insert: "ListMem x xs \ ListMem x (y # xs)" lemma ListMem_iff: "(ListMem x xs) = (x \ set xs)" proof show "ListMem x xs \ x \ set xs" by (induct set: ListMem) auto show "x \ set xs \ ListMem x xs" by (induct xs) (auto intro: ListMem.intros) qed subsubsection \Lists as Cartesian products\ text\\set_Cons A Xs\: the set of lists with head drawn from \<^term>\A\ and tail drawn from \<^term>\Xs\.\ definition set_Cons :: "'a set \ 'a list set \ 'a list set" where "set_Cons A XS = {z. \x xs. z = x # xs \ x \ A \ xs \ XS}" lemma set_Cons_sing_Nil [simp]: "set_Cons A {[]} = (%x. [x])`A" by (auto simp add: set_Cons_def) text\Yields the set of lists, all of the same length as the argument and with elements drawn from the corresponding element of the argument.\ primrec listset :: "'a set list \ 'a list set" where "listset [] = {[]}" | "listset (A # As) = set_Cons A (listset As)" subsection \Relations on Lists\ subsubsection \Length Lexicographic Ordering\ text\These orderings preserve well-foundedness: shorter lists precede longer lists. These ordering are not used in dictionaries.\ primrec \ \The lexicographic ordering for lists of the specified length\ lexn :: "('a \ 'a) set \ nat \ ('a list \ 'a list) set" where "lexn r 0 = {}" | "lexn r (Suc n) = (map_prod (%(x, xs). x#xs) (%(x, xs). x#xs) ` (r <*lex*> lexn r n)) Int {(xs, ys). length xs = Suc n \ length ys = Suc n}" definition lex :: "('a \ 'a) set \ ('a list \ 'a list) set" where "lex r = (\n. lexn r n)" \ \Holds only between lists of the same length\ definition lenlex :: "('a \ 'a) set => ('a list \ 'a list) set" where "lenlex r = inv_image (less_than <*lex*> lex r) (\xs. (length xs, xs))" \ \Compares lists by their length and then lexicographically\ lemma wf_lexn: assumes "wf r" shows "wf (lexn r n)" proof (induct n) case (Suc n) have inj: "inj (\(x, xs). x # xs)" using assms by (auto simp: inj_on_def) have wf: "wf (map_prod (\(x, xs). x # xs) (\(x, xs). x # xs) ` (r <*lex*> lexn r n))" by (simp add: Suc.hyps assms wf_lex_prod wf_map_prod_image [OF _ inj]) then show ?case by (rule wf_subset) auto qed auto lemma lexn_length: "(xs, ys) \ lexn r n \ length xs = n \ length ys = n" by (induct n arbitrary: xs ys) auto lemma wf_lex [intro!]: assumes "wf r" shows "wf (lex r)" unfolding lex_def proof (rule wf_UN) show "wf (lexn r i)" for i by (simp add: assms wf_lexn) show "\i j. lexn r i \ lexn r j \ Domain (lexn r i) \ Range (lexn r j) = {}" by (metis DomainE Int_emptyI RangeE lexn_length) qed lemma lexn_conv: "lexn r n = {(xs,ys). length xs = n \ length ys = n \ (\xys x y xs' ys'. xs= xys @ x#xs' \ ys= xys @ y # ys' \ (x, y) \ r)}" proof (induction n) case (Suc n) then show ?case apply (simp add: image_Collect lex_prod_def, safe, blast) apply (rule_tac x = "ab # xys" in exI, simp) apply (case_tac xys; force) done qed auto text\By Mathias Fleury:\ proposition lexn_transI: assumes "trans r" shows "trans (lexn r n)" unfolding trans_def proof (intro allI impI) fix as bs cs assume asbs: "(as, bs) \ lexn r n" and bscs: "(bs, cs) \ lexn r n" obtain abs a b as' bs' where n: "length as = n" and "length bs = n" and as: "as = abs @ a # as'" and bs: "bs = abs @ b # bs'" and abr: "(a, b) \ r" using asbs unfolding lexn_conv by blast obtain bcs b' c' cs' bs' where n': "length cs = n" and "length bs = n" and bs': "bs = bcs @ b' # bs'" and cs: "cs = bcs @ c' # cs'" and b'c'r: "(b', c') \ r" using bscs unfolding lexn_conv by blast consider (le) "length bcs < length abs" | (eq) "length bcs = length abs" | (ge) "length bcs > length abs" by linarith thus "(as, cs) \ lexn r n" proof cases let ?k = "length bcs" case le hence "as ! ?k = bs ! ?k" unfolding as bs by (simp add: nth_append) hence "(as ! ?k, cs ! ?k) \ r" using b'c'r unfolding bs' cs by auto moreover have "length bcs < length as" using le unfolding as by simp from id_take_nth_drop[OF this] have "as = take ?k as @ as ! ?k # drop (Suc ?k) as" . moreover have "length bcs < length cs" unfolding cs by simp from id_take_nth_drop[OF this] have "cs = take ?k cs @ cs ! ?k # drop (Suc ?k) cs" . moreover have "take ?k as = take ?k cs" using le arg_cong[OF bs, of "take (length bcs)"] unfolding cs as bs' by auto ultimately show ?thesis using n n' unfolding lexn_conv by auto next let ?k = "length abs" case ge hence "bs ! ?k = cs ! ?k" unfolding bs' cs by (simp add: nth_append) hence "(as ! ?k, cs ! ?k) \ r" using abr unfolding as bs by auto moreover have "length abs < length as" using ge unfolding as by simp from id_take_nth_drop[OF this] have "as = take ?k as @ as ! ?k # drop (Suc ?k) as" . moreover have "length abs < length cs" using n n' unfolding as by simp from id_take_nth_drop[OF this] have "cs = take ?k cs @ cs ! ?k # drop (Suc ?k) cs" . moreover have "take ?k as = take ?k cs" using ge arg_cong[OF bs', of "take (length abs)"] unfolding cs as bs by auto ultimately show ?thesis using n n' unfolding lexn_conv by auto next let ?k = "length abs" case eq hence *: "abs = bcs" "b = b'" using bs bs' by auto hence "(a, c') \ r" using abr b'c'r assms unfolding trans_def by blast with * show ?thesis using n n' unfolding lexn_conv as bs cs by auto qed qed corollary lex_transI: assumes "trans r" shows "trans (lex r)" using lexn_transI [OF assms] by (clarsimp simp add: lex_def trans_def) (metis lexn_length) lemma lex_conv: "lex r = {(xs,ys). length xs = length ys \ (\xys x y xs' ys'. xs = xys @ x # xs' \ ys = xys @ y # ys' \ (x, y) \ r)}" by (force simp add: lex_def lexn_conv) lemma wf_lenlex [intro!]: "wf r \ wf (lenlex r)" by (unfold lenlex_def) blast lemma lenlex_conv: "lenlex r = {(xs,ys). length xs < length ys \ length xs = length ys \ (xs, ys) \ lex r}" by (auto simp add: lenlex_def Id_on_def lex_prod_def inv_image_def) lemma total_lenlex: assumes "total r" shows "total (lenlex r)" proof - have "(xs,ys) \ lexn r (length xs) \ (ys,xs) \ lexn r (length xs)" if "xs \ ys" and len: "length xs = length ys" for xs ys proof - obtain pre x xs' y ys' where "x\y" and xs: "xs = pre @ [x] @ xs'" and ys: "ys = pre @ [y] @ys'" by (meson len \xs \ ys\ same_length_different) then consider "(x,y) \ r" | "(y,x) \ r" by (meson UNIV_I assms total_on_def) then show ?thesis by cases (use len in \(force simp add: lexn_conv xs ys)+\) qed then show ?thesis by (fastforce simp: lenlex_def total_on_def lex_def) qed lemma lenlex_transI [intro]: "trans r \ trans (lenlex r)" unfolding lenlex_def by (meson lex_transI trans_inv_image trans_less_than trans_lex_prod) lemma Nil_notin_lex [iff]: "([], ys) \ lex r" by (simp add: lex_conv) lemma Nil2_notin_lex [iff]: "(xs, []) \ lex r" by (simp add:lex_conv) lemma Cons_in_lex [simp]: "(x # xs, y # ys) \ lex r \ (x, y) \ r \ length xs = length ys \ x = y \ (xs, ys) \ lex r" (is "?lhs = ?rhs") proof assume ?lhs then show ?rhs by (simp add: lex_conv) (metis hd_append list.sel(1) list.sel(3) tl_append2) next assume ?rhs then show ?lhs by (simp add: lex_conv) (blast intro: Cons_eq_appendI) qed lemma Nil_lenlex_iff1 [simp]: "([], ns) \ lenlex r \ ns \ []" and Nil_lenlex_iff2 [simp]: "(ns,[]) \ lenlex r" by (auto simp: lenlex_def) lemma Cons_lenlex_iff: "((m # ms, n # ns) \ lenlex r) \ length ms < length ns \ length ms = length ns \ (m,n) \ r \ (m = n \ (ms,ns) \ lenlex r)" by (auto simp: lenlex_def) lemma lenlex_irreflexive: "(\x. (x,x) \ r) \ (xs,xs) \ lenlex r" by (induction xs) (auto simp add: Cons_lenlex_iff) lemma lenlex_trans: "\(x,y) \ lenlex r; (y,z) \ lenlex r; trans r\ \ (x,z) \ lenlex r" by (meson lenlex_transI transD) lemma lenlex_length: "(ms, ns) \ lenlex r \ length ms \ length ns" by (auto simp: lenlex_def) lemma lex_append_rightI: "(xs, ys) \ lex r \ length vs = length us \ (xs @ us, ys @ vs) \ lex r" by (fastforce simp: lex_def lexn_conv) lemma lex_append_leftI: "(ys, zs) \ lex r \ (xs @ ys, xs @ zs) \ lex r" by (induct xs) auto lemma lex_append_leftD: "\x. (x,x) \ r \ (xs @ ys, xs @ zs) \ lex r \ (ys, zs) \ lex r" by (induct xs) auto lemma lex_append_left_iff: "\x. (x,x) \ r \ (xs @ ys, xs @ zs) \ lex r \ (ys, zs) \ lex r" by(metis lex_append_leftD lex_append_leftI) lemma lex_take_index: assumes "(xs, ys) \ lex r" obtains i where "i < length xs" and "i < length ys" and "take i xs = take i ys" and "(xs ! i, ys ! i) \ r" proof - obtain n us x xs' y ys' where "(xs, ys) \ lexn r n" and "length xs = n" and "length ys = n" and "xs = us @ x # xs'" and "ys = us @ y # ys'" and "(x, y) \ r" using assms by (fastforce simp: lex_def lexn_conv) then show ?thesis by (intro that [of "length us"]) auto qed lemma irrefl_lex: "irrefl r \ irrefl (lex r)" by (meson irrefl_def lex_take_index) lemma lexl_not_refl [simp]: "irrefl r \ (x,x) \ lex r" by (meson irrefl_def lex_take_index) subsubsection \Lexicographic Ordering\ text \Classical lexicographic ordering on lists, ie. "a" < "ab" < "b". This ordering does \emph{not} preserve well-foundedness. Author: N. Voelker, March 2005.\ definition lexord :: "('a \ 'a) set \ ('a list \ 'a list) set" where "lexord r = {(x,y). \ a v. y = x @ a # v \ (\ u a b v w. (a,b) \ r \ x = u @ (a # v) \ y = u @ (b # w))}" lemma lexord_Nil_left[simp]: "([],y) \ lexord r = (\ a x. y = a # x)" by (unfold lexord_def, induct_tac y, auto) lemma lexord_Nil_right[simp]: "(x,[]) \ lexord r" by (unfold lexord_def, induct_tac x, auto) lemma lexord_cons_cons[simp]: "(a # x, b # y) \ lexord r \ (a,b)\ r \ (a = b \ (x,y)\ lexord r)" (is "?lhs = ?rhs") proof assume ?lhs then show ?rhs apply (simp add: lexord_def) apply (metis hd_append list.sel(1) list.sel(3) tl_append2) done qed (auto simp add: lexord_def; (blast | meson Cons_eq_appendI)) lemmas lexord_simps = lexord_Nil_left lexord_Nil_right lexord_cons_cons lemma lexord_same_pref_iff: "(xs @ ys, xs @ zs) \ lexord r \ (\x \ set xs. (x,x) \ r) \ (ys, zs) \ lexord r" by(induction xs) auto lemma lexord_same_pref_if_irrefl[simp]: "irrefl r \ (xs @ ys, xs @ zs) \ lexord r \ (ys, zs) \ lexord r" by (simp add: irrefl_def lexord_same_pref_iff) lemma lexord_append_rightI: "\ b z. y = b # z \ (x, x @ y) \ lexord r" by (metis append_Nil2 lexord_Nil_left lexord_same_pref_iff) lemma lexord_append_left_rightI: "(a,b) \ r \ (u @ a # x, u @ b # y) \ lexord r" by (simp add: lexord_same_pref_iff) lemma lexord_append_leftI: "(u,v) \ lexord r \ (x @ u, x @ v) \ lexord r" by (simp add: lexord_same_pref_iff) lemma lexord_append_leftD: "\(x @ u, x @ v) \ lexord r; (\a. (a,a) \ r) \ \ (u,v) \ lexord r" by (simp add: lexord_same_pref_iff) lemma lexord_take_index_conv: "((x,y) \ lexord r) = ((length x < length y \ take (length x) y = x) \ (\i. i < min(length x)(length y) \ take i x = take i y \ (x!i,y!i) \ r))" proof - have "(\a v. y = x @ a # v) = (length x < length y \ take (length x) y = x)" by (metis Cons_nth_drop_Suc append_eq_conv_conj drop_all list.simps(3) not_le) moreover have "(\u a b. (a, b) \ r \ (\v. x = u @ a # v) \ (\w. y = u @ b # w)) = (\i take i x = take i y \ (x ! i, y ! i) \ r)" apply safe using less_iff_Suc_add apply auto[1] by (metis id_take_nth_drop) ultimately show ?thesis by (auto simp: lexord_def Let_def) qed \ \lexord is extension of partial ordering List.lex\ lemma lexord_lex: "(x,y) \ lex r = ((x,y) \ lexord r \ length x = length y)" proof (induction x arbitrary: y) case (Cons a x y) then show ?case by (cases y) (force+) qed auto lemma lexord_sufI: assumes "(u,w) \ lexord r" "length w \ length u" shows "(u@v,w@z) \ lexord r" proof- from leD[OF assms(2)] assms(1)[unfolded lexord_take_index_conv[of u w r] min_absorb2[OF assms(2)]] obtain i where "take i u = take i w" and "(u!i,w!i) \ r" and "i < length w" by blast hence "((u@v)!i, (w@z)!i) \ r" unfolding nth_append using less_le_trans[OF \i < length w\ assms(2)] \(u!i,w!i) \ r\ by presburger moreover have "i < min (length (u@v)) (length (w@z))" using assms(2) \i < length w\ by simp moreover have "take i (u@v) = take i (w@z)" using assms(2) \i < length w\ \take i u = take i w\ by simp ultimately show ?thesis using lexord_take_index_conv by blast qed lemma lexord_sufE: assumes "(xs@zs,ys@qs) \ lexord r" "xs \ ys" "length xs = length ys" "length zs = length qs" shows "(xs,ys) \ lexord r" proof- obtain i where "i < length (xs@zs)" and "i < length (ys@qs)" and "take i (xs@zs) = take i (ys@qs)" and "((xs@zs) ! i, (ys@qs) ! i) \ r" using assms(1) lex_take_index[unfolded lexord_lex,of "xs @ zs" "ys @ qs" r] length_append[of xs zs, unfolded assms(3,4), folded length_append[of ys qs]] by blast have "length (take i xs) = length (take i ys)" by (simp add: assms(3)) have "i < length xs" using assms(2,3) le_less_linear take_all[of xs i] take_all[of ys i] \take i (xs @ zs) = take i (ys @ qs)\ append_eq_append_conv take_append by metis hence "(xs ! i, ys ! i) \ r" using \((xs @ zs) ! i, (ys @ qs) ! i) \ r\ assms(3) by (simp add: nth_append) moreover have "take i xs = take i ys" using assms(3) \take i (xs @ zs) = take i (ys @ qs)\ by auto ultimately show ?thesis unfolding lexord_take_index_conv using \i < length xs\ assms(3) by fastforce qed lemma lexord_irreflexive: "\x. (x,x) \ r \ (xs,xs) \ lexord r" by (induct xs) auto text\By Ren\'e Thiemann:\ lemma lexord_partial_trans: "(\x y z. x \ set xs \ (x,y) \ r \ (y,z) \ r \ (x,z) \ r) \ (xs,ys) \ lexord r \ (ys,zs) \ lexord r \ (xs,zs) \ lexord r" proof (induct xs arbitrary: ys zs) case Nil from Nil(3) show ?case unfolding lexord_def by (cases zs, auto) next case (Cons x xs yys zzs) from Cons(3) obtain y ys where yys: "yys = y # ys" unfolding lexord_def by (cases yys, auto) note Cons = Cons[unfolded yys] from Cons(3) have one: "(x,y) \ r \ x = y \ (xs,ys) \ lexord r" by auto from Cons(4) obtain z zs where zzs: "zzs = z # zs" unfolding lexord_def by (cases zzs, auto) note Cons = Cons[unfolded zzs] from Cons(4) have two: "(y,z) \ r \ y = z \ (ys,zs) \ lexord r" by auto { assume "(xs,ys) \ lexord r" and "(ys,zs) \ lexord r" from Cons(1)[OF _ this] Cons(2) have "(xs,zs) \ lexord r" by auto } note ind1 = this { assume "(x,y) \ r" and "(y,z) \ r" from Cons(2)[OF _ this] have "(x,z) \ r" by auto } note ind2 = this from one two ind1 ind2 have "(x,z) \ r \ x = z \ (xs,zs) \ lexord r" by blast thus ?case unfolding zzs by auto qed lemma lexord_trans: "\ (x, y) \ lexord r; (y, z) \ lexord r; trans r \ \ (x, z) \ lexord r" by(auto simp: trans_def intro:lexord_partial_trans) lemma lexord_transI: "trans r \ trans (lexord r)" by (meson lexord_trans transI) lemma total_lexord: "total r \ total (lexord r)" unfolding total_on_def proof clarsimp fix x y assume "\x y. x \ y \ (x, y) \ r \ (y, x) \ r" and "(x::'a list) \ y" and "(y, x) \ lexord r" then show "(x, y) \ lexord r" proof (induction x arbitrary: y) case Nil then show ?case by (metis lexord_Nil_left list.exhaust) next case (Cons a x y) then show ?case by (cases y) (force+) qed qed corollary lexord_linear: "(\a b. (a,b) \ r \ a = b \ (b,a) \ r) \ (x,y) \ lexord r \ x = y \ (y,x) \ lexord r" using total_lexord by (metis UNIV_I total_on_def) lemma lexord_irrefl: "irrefl R \ irrefl (lexord R)" by (simp add: irrefl_def lexord_irreflexive) lemma lexord_asym: assumes "asym R" shows "asym (lexord R)" proof fix xs ys assume "(xs, ys) \ lexord R" then show "(ys, xs) \ lexord R" proof (induct xs arbitrary: ys) case Nil then show ?case by simp next case (Cons x xs) then obtain z zs where ys: "ys = z # zs" by (cases ys) auto with assms Cons show ?case by (auto elim: asym.cases) qed qed lemma lexord_asymmetric: assumes "asym R" assumes hyp: "(a, b) \ lexord R" shows "(b, a) \ lexord R" proof - from \asym R\ have "asym (lexord R)" by (rule lexord_asym) then show ?thesis by (rule asym.cases) (auto simp add: hyp) qed lemma asym_lex: "asym R \ asym (lex R)" by (meson asym.simps irrefl_lex lexord_asym lexord_lex) lemma asym_lenlex: "asym R \ asym (lenlex R)" by (simp add: lenlex_def asym_inv_image asym_less_than asym_lex asym_lex_prod) lemma lenlex_append1: assumes len: "(us,xs) \ lenlex R" and eq: "length vs = length ys" shows "(us @ vs, xs @ ys) \ lenlex R" using len proof (induction us) case Nil then show ?case by (simp add: lenlex_def eq) next case (Cons u us) with lex_append_rightI show ?case by (fastforce simp add: lenlex_def eq) qed lemma lenlex_append2 [simp]: assumes "irrefl R" shows "(us @ xs, us @ ys) \ lenlex R \ (xs, ys) \ lenlex R" proof (induction us) case Nil then show ?case by (simp add: lenlex_def) next case (Cons u us) with assms show ?case by (auto simp: lenlex_def irrefl_def) qed text \ Predicate version of lexicographic order integrated with Isabelle's order type classes. Author: Andreas Lochbihler \ context ord begin context notes [[inductive_internals]] begin inductive lexordp :: "'a list \ 'a list \ bool" where Nil: "lexordp [] (y # ys)" | Cons: "x < y \ lexordp (x # xs) (y # ys)" | Cons_eq: "\ \ x < y; \ y < x; lexordp xs ys \ \ lexordp (x # xs) (y # ys)" end lemma lexordp_simps [simp]: "lexordp [] ys = (ys \ [])" "lexordp xs [] = False" "lexordp (x # xs) (y # ys) \ x < y \ \ y < x \ lexordp xs ys" by(subst lexordp.simps, fastforce simp add: neq_Nil_conv)+ inductive lexordp_eq :: "'a list \ 'a list \ bool" where Nil: "lexordp_eq [] ys" | Cons: "x < y \ lexordp_eq (x # xs) (y # ys)" | Cons_eq: "\ \ x < y; \ y < x; lexordp_eq xs ys \ \ lexordp_eq (x # xs) (y # ys)" lemma lexordp_eq_simps [simp]: "lexordp_eq [] ys = True" "lexordp_eq xs [] \ xs = []" "lexordp_eq (x # xs) [] = False" "lexordp_eq (x # xs) (y # ys) \ x < y \ \ y < x \ lexordp_eq xs ys" by(subst lexordp_eq.simps, fastforce)+ lemma lexordp_append_rightI: "ys \ Nil \ lexordp xs (xs @ ys)" by(induct xs)(auto simp add: neq_Nil_conv) lemma lexordp_append_left_rightI: "x < y \ lexordp (us @ x # xs) (us @ y # ys)" by(induct us) auto lemma lexordp_eq_refl: "lexordp_eq xs xs" by(induct xs) simp_all lemma lexordp_append_leftI: "lexordp us vs \ lexordp (xs @ us) (xs @ vs)" by(induct xs) auto lemma lexordp_append_leftD: "\ lexordp (xs @ us) (xs @ vs); \a. \ a < a \ \ lexordp us vs" by(induct xs) auto lemma lexordp_irreflexive: assumes irrefl: "\x. \ x < x" shows "\ lexordp xs xs" proof assume "lexordp xs xs" thus False by(induct xs ys\xs)(simp_all add: irrefl) qed lemma lexordp_into_lexordp_eq: "lexordp xs ys \ lexordp_eq xs ys" by (induction rule: lexordp.induct) simp_all lemma lexordp_eq_pref: "lexordp_eq u (u @ v)" by (metis append_Nil2 lexordp_append_rightI lexordp_eq_refl lexordp_into_lexordp_eq) end declare ord.lexordp_simps [simp, code] declare ord.lexordp_eq_simps [code, simp] context order begin lemma lexordp_antisym: assumes "lexordp xs ys" "lexordp ys xs" shows False using assms by induct auto lemma lexordp_irreflexive': "\ lexordp xs xs" by(rule lexordp_irreflexive) simp end context linorder begin lemma lexordp_cases [consumes 1, case_names Nil Cons Cons_eq, cases pred: lexordp]: assumes "lexordp xs ys" obtains (Nil) y ys' where "xs = []" "ys = y # ys'" | (Cons) x xs' y ys' where "xs = x # xs'" "ys = y # ys'" "x < y" | (Cons_eq) x xs' ys' where "xs = x # xs'" "ys = x # ys'" "lexordp xs' ys'" using assms by cases (fastforce simp add: not_less_iff_gr_or_eq)+ lemma lexordp_induct [consumes 1, case_names Nil Cons Cons_eq, induct pred: lexordp]: assumes major: "lexordp xs ys" and Nil: "\y ys. P [] (y # ys)" and Cons: "\x xs y ys. x < y \ P (x # xs) (y # ys)" and Cons_eq: "\x xs ys. \ lexordp xs ys; P xs ys \ \ P (x # xs) (x # ys)" shows "P xs ys" using major by induct (simp_all add: Nil Cons not_less_iff_gr_or_eq Cons_eq) lemma lexordp_iff: "lexordp xs ys \ (\x vs. ys = xs @ x # vs) \ (\us a b vs ws. a < b \ xs = us @ a # vs \ ys = us @ b # ws)" (is "?lhs = ?rhs") proof assume ?lhs thus ?rhs proof induct case Cons_eq thus ?case by simp (metis append.simps(2)) qed(fastforce intro: disjI2 del: disjCI intro: exI[where x="[]"])+ next assume ?rhs thus ?lhs by(auto intro: lexordp_append_leftI[where us="[]", simplified] lexordp_append_leftI) qed lemma lexordp_conv_lexord: "lexordp xs ys \ (xs, ys) \ lexord {(x, y). x < y}" by(simp add: lexordp_iff lexord_def) lemma lexordp_eq_antisym: assumes "lexordp_eq xs ys" "lexordp_eq ys xs" shows "xs = ys" using assms by induct simp_all lemma lexordp_eq_trans: assumes "lexordp_eq xs ys" and "lexordp_eq ys zs" shows "lexordp_eq xs zs" using assms by (induct arbitrary: zs) (case_tac zs; auto)+ lemma lexordp_trans: assumes "lexordp xs ys" "lexordp ys zs" shows "lexordp xs zs" using assms by (induct arbitrary: zs) (case_tac zs; auto)+ lemma lexordp_linear: "lexordp xs ys \ xs = ys \ lexordp ys xs" by(induct xs arbitrary: ys; case_tac ys; fastforce) lemma lexordp_conv_lexordp_eq: "lexordp xs ys \ lexordp_eq xs ys \ \ lexordp_eq ys xs" (is "?lhs \ ?rhs") proof assume ?lhs hence "\ lexordp_eq ys xs" by induct simp_all with \?lhs\ show ?rhs by (simp add: lexordp_into_lexordp_eq) next assume ?rhs hence "lexordp_eq xs ys" "\ lexordp_eq ys xs" by simp_all thus ?lhs by induct simp_all qed lemma lexordp_eq_conv_lexord: "lexordp_eq xs ys \ xs = ys \ lexordp xs ys" by(auto simp add: lexordp_conv_lexordp_eq lexordp_eq_refl dest: lexordp_eq_antisym) lemma lexordp_eq_linear: "lexordp_eq xs ys \ lexordp_eq ys xs" by (induct xs arbitrary: ys) (case_tac ys; auto)+ lemma lexordp_linorder: "class.linorder lexordp_eq lexordp" by unfold_locales (auto simp add: lexordp_conv_lexordp_eq lexordp_eq_refl lexordp_eq_antisym intro: lexordp_eq_trans del: disjCI intro: lexordp_eq_linear) end lemma sorted_insort_is_snoc: "sorted xs \ \x \ set xs. a \ x \ insort a xs = xs @ [a]" by (induct xs) (auto dest!: insort_is_Cons) subsubsection \Lexicographic combination of measure functions\ text \These are useful for termination proofs\ definition "measures fs = inv_image (lex less_than) (%a. map (%f. f a) fs)" lemma wf_measures[simp]: "wf (measures fs)" unfolding measures_def by blast lemma in_measures[simp]: "(x, y) \ measures [] = False" "(x, y) \ measures (f # fs) = (f x < f y \ (f x = f y \ (x, y) \ measures fs))" unfolding measures_def by auto lemma measures_less: "f x < f y \ (x, y) \ measures (f#fs)" by simp lemma measures_lesseq: "f x \ f y \ (x, y) \ measures fs \ (x, y) \ measures (f#fs)" by auto subsubsection \Lifting Relations to Lists: one element\ definition listrel1 :: "('a \ 'a) set \ ('a list \ 'a list) set" where "listrel1 r = {(xs,ys). \us z z' vs. xs = us @ z # vs \ (z,z') \ r \ ys = us @ z' # vs}" lemma listrel1I: "\ (x, y) \ r; xs = us @ x # vs; ys = us @ y # vs \ \ (xs, ys) \ listrel1 r" unfolding listrel1_def by auto lemma listrel1E: "\ (xs, ys) \ listrel1 r; !!x y us vs. \ (x, y) \ r; xs = us @ x # vs; ys = us @ y # vs \ \ P \ \ P" unfolding listrel1_def by auto lemma not_Nil_listrel1 [iff]: "([], xs) \ listrel1 r" unfolding listrel1_def by blast lemma not_listrel1_Nil [iff]: "(xs, []) \ listrel1 r" unfolding listrel1_def by blast lemma Cons_listrel1_Cons [iff]: "(x # xs, y # ys) \ listrel1 r \ (x,y) \ r \ xs = ys \ x = y \ (xs, ys) \ listrel1 r" by (simp add: listrel1_def Cons_eq_append_conv) (blast) lemma listrel1I1: "(x,y) \ r \ (x # xs, y # xs) \ listrel1 r" by fast lemma listrel1I2: "(xs, ys) \ listrel1 r \ (x # xs, x # ys) \ listrel1 r" by fast lemma append_listrel1I: "(xs, ys) \ listrel1 r \ us = vs \ xs = ys \ (us, vs) \ listrel1 r \ (xs @ us, ys @ vs) \ listrel1 r" unfolding listrel1_def by auto (blast intro: append_eq_appendI)+ lemma Cons_listrel1E1[elim!]: assumes "(x # xs, ys) \ listrel1 r" and "\y. ys = y # xs \ (x, y) \ r \ R" and "\zs. ys = x # zs \ (xs, zs) \ listrel1 r \ R" shows R using assms by (cases ys) blast+ lemma Cons_listrel1E2[elim!]: assumes "(xs, y # ys) \ listrel1 r" and "\x. xs = x # ys \ (x, y) \ r \ R" and "\zs. xs = y # zs \ (zs, ys) \ listrel1 r \ R" shows R using assms by (cases xs) blast+ lemma snoc_listrel1_snoc_iff: "(xs @ [x], ys @ [y]) \ listrel1 r \ (xs, ys) \ listrel1 r \ x = y \ xs = ys \ (x,y) \ r" (is "?L \ ?R") proof assume ?L thus ?R by (fastforce simp: listrel1_def snoc_eq_iff_butlast butlast_append) next assume ?R then show ?L unfolding listrel1_def by force qed lemma listrel1_eq_len: "(xs,ys) \ listrel1 r \ length xs = length ys" unfolding listrel1_def by auto lemma listrel1_mono: "r \ s \ listrel1 r \ listrel1 s" unfolding listrel1_def by blast lemma listrel1_converse: "listrel1 (r\) = (listrel1 r)\" unfolding listrel1_def by blast lemma in_listrel1_converse: "(x,y) \ listrel1 (r\) \ (x,y) \ (listrel1 r)\" unfolding listrel1_def by blast lemma listrel1_iff_update: "(xs,ys) \ (listrel1 r) \ (\y n. (xs ! n, y) \ r \ n < length xs \ ys = xs[n:=y])" (is "?L \ ?R") proof assume "?L" then obtain x y u v where "xs = u @ x # v" "ys = u @ y # v" "(x,y) \ r" unfolding listrel1_def by auto then have "ys = xs[length u := y]" and "length u < length xs" and "(xs ! length u, y) \ r" by auto then show "?R" by auto next assume "?R" then obtain x y n where "(xs!n, y) \ r" "n < size xs" "ys = xs[n:=y]" "x = xs!n" by auto then obtain u v where "xs = u @ x # v" and "ys = u @ y # v" and "(x, y) \ r" by (auto intro: upd_conv_take_nth_drop id_take_nth_drop) then show "?L" by (auto simp: listrel1_def) qed text\Accessible part and wellfoundedness:\ lemma Cons_acc_listrel1I [intro!]: "x \ Wellfounded.acc r \ xs \ Wellfounded.acc (listrel1 r) \ (x # xs) \ Wellfounded.acc (listrel1 r)" apply (induct arbitrary: xs set: Wellfounded.acc) apply (erule thin_rl) apply (erule acc_induct) apply (rule accI) apply (blast) done lemma lists_accD: "xs \ lists (Wellfounded.acc r) \ xs \ Wellfounded.acc (listrel1 r)" proof (induct set: lists) case Nil then show ?case by (meson acc.intros not_listrel1_Nil) next case (Cons a l) then show ?case by blast qed lemma lists_accI: "xs \ Wellfounded.acc (listrel1 r) \ xs \ lists (Wellfounded.acc r)" apply (induct set: Wellfounded.acc) apply clarify apply (rule accI) apply (fastforce dest!: in_set_conv_decomp[THEN iffD1] simp: listrel1_def) done lemma wf_listrel1_iff[simp]: "wf(listrel1 r) = wf r" by (auto simp: wf_acc_iff intro: lists_accD lists_accI[THEN Cons_in_lists_iff[THEN iffD1, THEN conjunct1]]) subsubsection \Lifting Relations to Lists: all elements\ inductive_set listrel :: "('a \ 'b) set \ ('a list \ 'b list) set" for r :: "('a \ 'b) set" where Nil: "([],[]) \ listrel r" | Cons: "\(x,y) \ r; (xs,ys) \ listrel r\ \ (x#xs, y#ys) \ listrel r" inductive_cases listrel_Nil1 [elim!]: "([],xs) \ listrel r" inductive_cases listrel_Nil2 [elim!]: "(xs,[]) \ listrel r" inductive_cases listrel_Cons1 [elim!]: "(y#ys,xs) \ listrel r" inductive_cases listrel_Cons2 [elim!]: "(xs,y#ys) \ listrel r" lemma listrel_eq_len: "(xs, ys) \ listrel r \ length xs = length ys" by(induct rule: listrel.induct) auto lemma listrel_iff_zip [code_unfold]: "(xs,ys) \ listrel r \ length xs = length ys \ (\(x,y) \ set(zip xs ys). (x,y) \ r)" (is "?L \ ?R") proof assume ?L thus ?R by induct (auto intro: listrel_eq_len) next assume ?R thus ?L apply (clarify) by (induct rule: list_induct2) (auto intro: listrel.intros) qed lemma listrel_iff_nth: "(xs,ys) \ listrel r \ length xs = length ys \ (\n < length xs. (xs!n, ys!n) \ r)" (is "?L \ ?R") by (auto simp add: all_set_conv_all_nth listrel_iff_zip) lemma listrel_mono: "r \ s \ listrel r \ listrel s" by (meson listrel_iff_nth subrelI subset_eq) lemma listrel_subset: assumes "r \ A \ A" shows "listrel r \ lists A \ lists A" proof clarify show "a \ lists A \ b \ lists A" if "(a, b) \ listrel r" for a b using that assms by (induction rule: listrel.induct, auto) qed lemma listrel_refl_on: assumes "refl_on A r" shows "refl_on (lists A) (listrel r)" proof - have "l \ lists A \ (l, l) \ listrel r" for l using assms unfolding refl_on_def by (induction l, auto intro: listrel.intros) then show ?thesis by (meson assms listrel_subset refl_on_def) qed lemma listrel_sym: "sym r \ sym (listrel r)" by (simp add: listrel_iff_nth sym_def) lemma listrel_trans: assumes "trans r" shows "trans (listrel r)" proof - have "(x, z) \ listrel r" if "(x, y) \ listrel r" "(y, z) \ listrel r" for x y z using that proof induction case (Cons x y xs ys) then show ?case by clarsimp (metis assms listrel.Cons listrel_iff_nth transD) qed auto then show ?thesis using transI by blast qed theorem equiv_listrel: "equiv A r \ equiv (lists A) (listrel r)" by (simp add: equiv_def listrel_refl_on listrel_sym listrel_trans) lemma listrel_rtrancl_refl[iff]: "(xs,xs) \ listrel(r\<^sup>*)" using listrel_refl_on[of UNIV, OF refl_rtrancl] by(auto simp: refl_on_def) lemma listrel_rtrancl_trans: "\(xs,ys) \ listrel(r\<^sup>*); (ys,zs) \ listrel(r\<^sup>*)\ \ (xs,zs) \ listrel(r\<^sup>*)" by (metis listrel_trans trans_def trans_rtrancl) lemma listrel_Nil [simp]: "listrel r `` {[]} = {[]}" by (blast intro: listrel.intros) lemma listrel_Cons: "listrel r `` {x#xs} = set_Cons (r``{x}) (listrel r `` {xs})" by (auto simp add: set_Cons_def intro: listrel.intros) text \Relating \<^term>\listrel1\, \<^term>\listrel\ and closures:\ lemma listrel1_rtrancl_subset_rtrancl_listrel1: "listrel1 (r\<^sup>*) \ (listrel1 r)\<^sup>*" proof (rule subrelI) fix xs ys assume 1: "(xs,ys) \ listrel1 (r\<^sup>*)" { fix x y us vs have "(x,y) \ r\<^sup>* \ (us @ x # vs, us @ y # vs) \ (listrel1 r)\<^sup>*" proof(induct rule: rtrancl.induct) case rtrancl_refl show ?case by simp next case rtrancl_into_rtrancl thus ?case by (metis listrel1I rtrancl.rtrancl_into_rtrancl) qed } thus "(xs,ys) \ (listrel1 r)\<^sup>*" using 1 by(blast elim: listrel1E) qed lemma rtrancl_listrel1_eq_len: "(x,y) \ (listrel1 r)\<^sup>* \ length x = length y" by (induct rule: rtrancl.induct) (auto intro: listrel1_eq_len) lemma rtrancl_listrel1_ConsI1: "(xs,ys) \ (listrel1 r)\<^sup>* \ (x#xs,x#ys) \ (listrel1 r)\<^sup>*" proof (induction rule: rtrancl.induct) case (rtrancl_into_rtrancl a b c) then show ?case by (metis listrel1I2 rtrancl.rtrancl_into_rtrancl) qed auto lemma rtrancl_listrel1_ConsI2: "(x,y) \ r\<^sup>* \ (xs, ys) \ (listrel1 r)\<^sup>* \ (x # xs, y # ys) \ (listrel1 r)\<^sup>*" by (meson in_mono listrel1I1 listrel1_rtrancl_subset_rtrancl_listrel1 rtrancl_listrel1_ConsI1 rtrancl_trans) lemma listrel1_subset_listrel: "r \ r' \ refl r' \ listrel1 r \ listrel(r')" by(auto elim!: listrel1E simp add: listrel_iff_zip set_zip refl_on_def) lemma listrel_reflcl_if_listrel1: "(xs,ys) \ listrel1 r \ (xs,ys) \ listrel(r\<^sup>*)" by(erule listrel1E)(auto simp add: listrel_iff_zip set_zip) lemma listrel_rtrancl_eq_rtrancl_listrel1: "listrel (r\<^sup>*) = (listrel1 r)\<^sup>*" proof { fix x y assume "(x,y) \ listrel (r\<^sup>*)" then have "(x,y) \ (listrel1 r)\<^sup>*" by induct (auto intro: rtrancl_listrel1_ConsI2) } then show "listrel (r\<^sup>*) \ (listrel1 r)\<^sup>*" by (rule subrelI) next show "listrel (r\<^sup>*) \ (listrel1 r)\<^sup>*" proof(rule subrelI) fix xs ys assume "(xs,ys) \ (listrel1 r)\<^sup>*" then show "(xs,ys) \ listrel (r\<^sup>*)" proof induct case base show ?case by(auto simp add: listrel_iff_zip set_zip) next case (step ys zs) thus ?case by (metis listrel_reflcl_if_listrel1 listrel_rtrancl_trans) qed qed qed lemma rtrancl_listrel1_if_listrel: "(xs,ys) \ listrel r \ (xs,ys) \ (listrel1 r)\<^sup>*" by(metis listrel_rtrancl_eq_rtrancl_listrel1 subsetD[OF listrel_mono] r_into_rtrancl subsetI) lemma listrel_subset_rtrancl_listrel1: "listrel r \ (listrel1 r)\<^sup>*" by(fast intro:rtrancl_listrel1_if_listrel) subsection \Size function\ lemma [measure_function]: "is_measure f \ is_measure (size_list f)" by (rule is_measure_trivial) lemma [measure_function]: "is_measure f \ is_measure (size_option f)" by (rule is_measure_trivial) lemma size_list_estimation[termination_simp]: "x \ set xs \ y < f x \ y < size_list f xs" by (induct xs) auto lemma size_list_estimation'[termination_simp]: "x \ set xs \ y \ f x \ y \ size_list f xs" by (induct xs) auto lemma size_list_map[simp]: "size_list f (map g xs) = size_list (f \ g) xs" by (induct xs) auto lemma size_list_append[simp]: "size_list f (xs @ ys) = size_list f xs + size_list f ys" by (induct xs, auto) lemma size_list_pointwise[termination_simp]: "(\x. x \ set xs \ f x \ g x) \ size_list f xs \ size_list g xs" by (induct xs) force+ subsection \Monad operation\ definition bind :: "'a list \ ('a \ 'b list) \ 'b list" where "bind xs f = concat (map f xs)" hide_const (open) bind lemma bind_simps [simp]: "List.bind [] f = []" "List.bind (x # xs) f = f x @ List.bind xs f" by (simp_all add: bind_def) lemma list_bind_cong [fundef_cong]: assumes "xs = ys" "(\x. x \ set xs \ f x = g x)" shows "List.bind xs f = List.bind ys g" proof - from assms(2) have "List.bind xs f = List.bind xs g" by (induction xs) simp_all with assms(1) show ?thesis by simp qed lemma set_list_bind: "set (List.bind xs f) = (\x\set xs. set (f x))" by (induction xs) simp_all subsection \Code generation\ text\Optional tail recursive version of \<^const>\map\. Can avoid stack overflow in some target languages.\ fun map_tailrec_rev :: "('a \ 'b) \ 'a list \ 'b list \ 'b list" where "map_tailrec_rev f [] bs = bs" | "map_tailrec_rev f (a#as) bs = map_tailrec_rev f as (f a # bs)" lemma map_tailrec_rev: "map_tailrec_rev f as bs = rev(map f as) @ bs" by(induction as arbitrary: bs) simp_all definition map_tailrec :: "('a \ 'b) \ 'a list \ 'b list" where "map_tailrec f as = rev (map_tailrec_rev f as [])" text\Code equation:\ lemma map_eq_map_tailrec: "map = map_tailrec" by(simp add: fun_eq_iff map_tailrec_def map_tailrec_rev) subsubsection \Counterparts for set-related operations\ definition member :: "'a list \ 'a \ bool" where [code_abbrev]: "member xs x \ x \ set xs" text \ Use \member\ only for generating executable code. Otherwise use \<^prop>\x \ set xs\ instead --- it is much easier to reason about. \ lemma member_rec [code]: "member (x # xs) y \ x = y \ member xs y" "member [] y \ False" by (auto simp add: member_def) lemma in_set_member (* FIXME delete candidate *): "x \ set xs \ member xs x" by (simp add: member_def) lemmas list_all_iff [code_abbrev] = fun_cong[OF list.pred_set] definition list_ex :: "('a \ bool) \ 'a list \ bool" where list_ex_iff [code_abbrev]: "list_ex P xs \ Bex (set xs) P" definition list_ex1 :: "('a \ bool) \ 'a list \ bool" where list_ex1_iff [code_abbrev]: "list_ex1 P xs \ (\! x. x \ set xs \ P x)" text \ Usually you should prefer \\x\set xs\, \\x\set xs\ and \\!x. x\set xs \ _\ over \<^const>\list_all\, \<^const>\list_ex\ and \<^const>\list_ex1\ in specifications. \ lemma list_all_simps [code]: "list_all P (x # xs) \ P x \ list_all P xs" "list_all P [] \ True" by (simp_all add: list_all_iff) lemma list_ex_simps [simp, code]: "list_ex P (x # xs) \ P x \ list_ex P xs" "list_ex P [] \ False" by (simp_all add: list_ex_iff) lemma list_ex1_simps [simp, code]: "list_ex1 P [] = False" "list_ex1 P (x # xs) = (if P x then list_all (\y. \ P y \ x = y) xs else list_ex1 P xs)" by (auto simp add: list_ex1_iff list_all_iff) lemma Ball_set_list_all: (* FIXME delete candidate *) "Ball (set xs) P \ list_all P xs" by (simp add: list_all_iff) lemma Bex_set_list_ex: (* FIXME delete candidate *) "Bex (set xs) P \ list_ex P xs" by (simp add: list_ex_iff) lemma list_all_append [simp]: "list_all P (xs @ ys) \ list_all P xs \ list_all P ys" by (auto simp add: list_all_iff) lemma list_ex_append [simp]: "list_ex P (xs @ ys) \ list_ex P xs \ list_ex P ys" by (auto simp add: list_ex_iff) lemma list_all_rev [simp]: "list_all P (rev xs) \ list_all P xs" by (simp add: list_all_iff) lemma list_ex_rev [simp]: "list_ex P (rev xs) \ list_ex P xs" by (simp add: list_ex_iff) lemma list_all_length: "list_all P xs \ (\n < length xs. P (xs ! n))" by (auto simp add: list_all_iff set_conv_nth) lemma list_ex_length: "list_ex P xs \ (\n < length xs. P (xs ! n))" by (auto simp add: list_ex_iff set_conv_nth) lemmas list_all_cong [fundef_cong] = list.pred_cong lemma list_ex_cong [fundef_cong]: "xs = ys \ (\x. x \ set ys \ f x = g x) \ list_ex f xs = list_ex g ys" by (simp add: list_ex_iff) definition can_select :: "('a \ bool) \ 'a set \ bool" where [code_abbrev]: "can_select P A = (\!x\A. P x)" lemma can_select_set_list_ex1 [code]: "can_select P (set A) = list_ex1 P A" by (simp add: list_ex1_iff can_select_def) text \Executable checks for relations on sets\ definition listrel1p :: "('a \ 'a \ bool) \ 'a list \ 'a list \ bool" where "listrel1p r xs ys = ((xs, ys) \ listrel1 {(x, y). r x y})" lemma [code_unfold]: "(xs, ys) \ listrel1 r = listrel1p (\x y. (x, y) \ r) xs ys" unfolding listrel1p_def by auto lemma [code]: "listrel1p r [] xs = False" "listrel1p r xs [] = False" "listrel1p r (x # xs) (y # ys) \ r x y \ xs = ys \ x = y \ listrel1p r xs ys" by (simp add: listrel1p_def)+ definition lexordp :: "('a \ 'a \ bool) \ 'a list \ 'a list \ bool" where "lexordp r xs ys = ((xs, ys) \ lexord {(x, y). r x y})" lemma [code_unfold]: "(xs, ys) \ lexord r = lexordp (\x y. (x, y) \ r) xs ys" unfolding lexordp_def by auto lemma [code]: "lexordp r xs [] = False" "lexordp r [] (y#ys) = True" "lexordp r (x # xs) (y # ys) = (r x y \ (x = y \ lexordp r xs ys))" unfolding lexordp_def by auto text \Bounded quantification and summation over nats.\ lemma atMost_upto [code_unfold]: "{..n} = set [0..m (\m \ {0..m (\m \ {0..m\n::nat. P m) \ (\m \ {0..n}. P m)" by auto lemma ex_nat_less [code_unfold]: "(\m\n::nat. P m) \ (\m \ {0..n}. P m)" by auto text\Bounded \LEAST\ operator:\ definition "Bleast S P = (LEAST x. x \ S \ P x)" definition "abort_Bleast S P = (LEAST x. x \ S \ P x)" declare [[code abort: abort_Bleast]] lemma Bleast_code [code]: "Bleast (set xs) P = (case filter P (sort xs) of x#xs \ x | [] \ abort_Bleast (set xs) P)" proof (cases "filter P (sort xs)") case Nil thus ?thesis by (simp add: Bleast_def abort_Bleast_def) next case (Cons x ys) have "(LEAST x. x \ set xs \ P x) = x" proof (rule Least_equality) show "x \ set xs \ P x" by (metis Cons Cons_eq_filter_iff in_set_conv_decomp set_sort) next fix y assume "y \ set xs \ P y" hence "y \ set (filter P xs)" by auto thus "x \ y" by (metis Cons eq_iff filter_sort set_ConsD set_sort sorted_wrt.simps(2) sorted_sort) qed thus ?thesis using Cons by (simp add: Bleast_def) qed declare Bleast_def[symmetric, code_unfold] text \Summation over ints.\ lemma greaterThanLessThan_upto [code_unfold]: "{i<..Optimizing by rewriting\ definition null :: "'a list \ bool" where [code_abbrev]: "null xs \ xs = []" text \ Efficient emptyness check is implemented by \<^const>\null\. \ lemma null_rec [code]: "null (x # xs) \ False" "null [] \ True" by (simp_all add: null_def) lemma eq_Nil_null: (* FIXME delete candidate *) "xs = [] \ null xs" by (simp add: null_def) lemma equal_Nil_null [code_unfold]: "HOL.equal xs [] \ null xs" "HOL.equal [] = null" by (auto simp add: equal null_def) definition maps :: "('a \ 'b list) \ 'a list \ 'b list" where [code_abbrev]: "maps f xs = concat (map f xs)" definition map_filter :: "('a \ 'b option) \ 'a list \ 'b list" where [code_post]: "map_filter f xs = map (the \ f) (filter (\x. f x \ None) xs)" text \ Operations \<^const>\maps\ and \<^const>\map_filter\ avoid intermediate lists on execution -- do not use for proving. \ lemma maps_simps [code]: "maps f (x # xs) = f x @ maps f xs" "maps f [] = []" by (simp_all add: maps_def) lemma map_filter_simps [code]: "map_filter f (x # xs) = (case f x of None \ map_filter f xs | Some y \ y # map_filter f xs)" "map_filter f [] = []" by (simp_all add: map_filter_def split: option.split) lemma concat_map_maps: (* FIXME delete candidate *) "concat (map f xs) = maps f xs" by (simp add: maps_def) lemma map_filter_map_filter [code_unfold]: "map f (filter P xs) = map_filter (\x. if P x then Some (f x) else None) xs" by (simp add: map_filter_def) text \Optimized code for \\i\{a..b::int}\ and \\n:{a.. and similiarly for \\\.\ definition all_interval_nat :: "(nat \ bool) \ nat \ nat \ bool" where "all_interval_nat P i j \ (\n \ {i.. i \ j \ P i \ all_interval_nat P (Suc i) j" proof - have *: "\n. P i \ \n\{Suc i.. i \ n \ n < j \ P n" proof - fix n assume "P i" "\n\{Suc i.. n" "n < j" then show "P n" by (cases "n = i") simp_all qed show ?thesis by (auto simp add: all_interval_nat_def intro: *) qed lemma list_all_iff_all_interval_nat [code_unfold]: "list_all P [i.. all_interval_nat P i j" by (simp add: list_all_iff all_interval_nat_def) lemma list_ex_iff_not_all_inverval_nat [code_unfold]: "list_ex P [i.. \ (all_interval_nat (Not \ P) i j)" by (simp add: list_ex_iff all_interval_nat_def) definition all_interval_int :: "(int \ bool) \ int \ int \ bool" where "all_interval_int P i j \ (\k \ {i..j}. P k)" lemma [code]: "all_interval_int P i j \ i > j \ P i \ all_interval_int P (i + 1) j" proof - have *: "\k. P i \ \k\{i+1..j}. P k \ i \ k \ k \ j \ P k" proof - fix k assume "P i" "\k\{i+1..j}. P k" "i \ k" "k \ j" then show "P k" by (cases "k = i") simp_all qed show ?thesis by (auto simp add: all_interval_int_def intro: *) qed lemma list_all_iff_all_interval_int [code_unfold]: "list_all P [i..j] \ all_interval_int P i j" by (simp add: list_all_iff all_interval_int_def) lemma list_ex_iff_not_all_inverval_int [code_unfold]: "list_ex P [i..j] \ \ (all_interval_int (Not \ P) i j)" by (simp add: list_ex_iff all_interval_int_def) text \optimized code (tail-recursive) for \<^term>\length\\ definition gen_length :: "nat \ 'a list \ nat" where "gen_length n xs = n + length xs" lemma gen_length_code [code]: "gen_length n [] = n" "gen_length n (x # xs) = gen_length (Suc n) xs" by(simp_all add: gen_length_def) declare list.size(3-4)[code del] lemma length_code [code]: "length = gen_length 0" by(simp add: gen_length_def fun_eq_iff) hide_const (open) member null maps map_filter all_interval_nat all_interval_int gen_length subsubsection \Pretty lists\ ML \ (* Code generation for list literals. *) signature LIST_CODE = sig val add_literal_list: string -> theory -> theory end; structure List_Code : LIST_CODE = struct open Basic_Code_Thingol; fun implode_list t = let fun dest_cons (IConst { sym = Code_Symbol.Constant \<^const_name>\Cons\, ... } `$ t1 `$ t2) = SOME (t1, t2) | dest_cons _ = NONE; val (ts, t') = Code_Thingol.unfoldr dest_cons t; in case t' of IConst { sym = Code_Symbol.Constant \<^const_name>\Nil\, ... } => SOME ts | _ => NONE end; fun print_list (target_fxy, target_cons) pr fxy t1 t2 = Code_Printer.brackify_infix (target_fxy, Code_Printer.R) fxy ( pr (Code_Printer.INFX (target_fxy, Code_Printer.X)) t1, Code_Printer.str target_cons, pr (Code_Printer.INFX (target_fxy, Code_Printer.R)) t2 ); fun add_literal_list target = let fun pretty literals pr _ vars fxy [(t1, _), (t2, _)] = case Option.map (cons t1) (implode_list t2) of SOME ts => Code_Printer.literal_list literals (map (pr vars Code_Printer.NOBR) ts) | NONE => print_list (Code_Printer.infix_cons literals) (pr vars) fxy t1 t2; in Code_Target.set_printings (Code_Symbol.Constant (\<^const_name>\Cons\, [(target, SOME (Code_Printer.complex_const_syntax (2, pretty)))])) end end; \ code_printing type_constructor list \ (SML) "_ list" and (OCaml) "_ list" and (Haskell) "![(_)]" and (Scala) "List[(_)]" | constant Nil \ (SML) "[]" and (OCaml) "[]" and (Haskell) "[]" and (Scala) "!Nil" | class_instance list :: equal \ (Haskell) - | constant "HOL.equal :: 'a list \ 'a list \ bool" \ (Haskell) infix 4 "==" setup \fold (List_Code.add_literal_list) ["SML", "OCaml", "Haskell", "Scala"]\ code_reserved SML list code_reserved OCaml list subsubsection \Use convenient predefined operations\ code_printing constant "(@)" \ (SML) infixr 7 "@" and (OCaml) infixr 6 "@" and (Haskell) infixr 5 "++" and (Scala) infixl 7 "++" | constant map \ (Haskell) "map" | constant filter \ (Haskell) "filter" | constant concat \ (Haskell) "concat" | constant List.maps \ (Haskell) "concatMap" | constant rev \ (Haskell) "reverse" | constant zip \ (Haskell) "zip" | constant List.null \ (Haskell) "null" | constant takeWhile \ (Haskell) "takeWhile" | constant dropWhile \ (Haskell) "dropWhile" | constant list_all \ (Haskell) "all" | constant list_ex \ (Haskell) "any" subsubsection \Implementation of sets by lists\ lemma is_empty_set [code]: "Set.is_empty (set xs) \ List.null xs" by (simp add: Set.is_empty_def null_def) lemma empty_set [code]: "{} = set []" by simp lemma UNIV_coset [code]: "UNIV = List.coset []" by simp lemma compl_set [code]: "- set xs = List.coset xs" by simp lemma compl_coset [code]: "- List.coset xs = set xs" by simp lemma [code]: "x \ set xs \ List.member xs x" "x \ List.coset xs \ \ List.member xs x" by (simp_all add: member_def) lemma insert_code [code]: "insert x (set xs) = set (List.insert x xs)" "insert x (List.coset xs) = List.coset (removeAll x xs)" by simp_all lemma remove_code [code]: "Set.remove x (set xs) = set (removeAll x xs)" "Set.remove x (List.coset xs) = List.coset (List.insert x xs)" by (simp_all add: remove_def Compl_insert) lemma filter_set [code]: "Set.filter P (set xs) = set (filter P xs)" by auto lemma image_set [code]: "image f (set xs) = set (map f xs)" by simp lemma subset_code [code]: "set xs \ B \ (\x\set xs. x \ B)" "A \ List.coset ys \ (\y\set ys. y \ A)" "List.coset [] \ set [] \ False" by auto text \A frequent case -- avoid intermediate sets\ lemma [code_unfold]: "set xs \ set ys \ list_all (\x. x \ set ys) xs" by (auto simp: list_all_iff) lemma Ball_set [code]: "Ball (set xs) P \ list_all P xs" by (simp add: list_all_iff) lemma Bex_set [code]: "Bex (set xs) P \ list_ex P xs" by (simp add: list_ex_iff) lemma card_set [code]: "card (set xs) = length (remdups xs)" proof - have "card (set (remdups xs)) = length (remdups xs)" by (rule distinct_card) simp then show ?thesis by simp qed lemma the_elem_set [code]: "the_elem (set [x]) = x" by simp lemma Pow_set [code]: "Pow (set []) = {{}}" "Pow (set (x # xs)) = (let A = Pow (set xs) in A \ insert x ` A)" by (simp_all add: Pow_insert Let_def) definition map_project :: "('a \ 'b option) \ 'a set \ 'b set" where "map_project f A = {b. \ a \ A. f a = Some b}" lemma [code]: "map_project f (set xs) = set (List.map_filter f xs)" by (auto simp add: map_project_def map_filter_def image_def) hide_const (open) map_project text \Operations on relations\ lemma product_code [code]: "Product_Type.product (set xs) (set ys) = set [(x, y). x \ xs, y \ ys]" by (auto simp add: Product_Type.product_def) lemma Id_on_set [code]: "Id_on (set xs) = set [(x, x). x \ xs]" by (auto simp add: Id_on_def) lemma [code]: "R `` S = List.map_project (\(x, y). if x \ S then Some y else None) R" unfolding map_project_def by (auto split: prod.split if_split_asm) lemma trancl_set_ntrancl [code]: "trancl (set xs) = ntrancl (card (set xs) - 1) (set xs)" by (simp add: finite_trancl_ntranl) lemma set_relcomp [code]: "set xys O set yzs = set ([(fst xy, snd yz). xy \ xys, yz \ yzs, snd xy = fst yz])" by auto (auto simp add: Bex_def image_def) lemma wf_set [code]: "wf (set xs) = acyclic (set xs)" by (simp add: wf_iff_acyclic_if_finite) subsection \Setup for Lifting/Transfer\ subsubsection \Transfer rules for the Transfer package\ context includes lifting_syntax begin lemma tl_transfer [transfer_rule]: "(list_all2 A ===> list_all2 A) tl tl" unfolding tl_def[abs_def] by transfer_prover lemma butlast_transfer [transfer_rule]: "(list_all2 A ===> list_all2 A) butlast butlast" by (rule rel_funI, erule list_all2_induct, auto) lemma map_rec: "map f xs = rec_list Nil (%x _ y. Cons (f x) y) xs" by (induct xs) auto lemma append_transfer [transfer_rule]: "(list_all2 A ===> list_all2 A ===> list_all2 A) append append" unfolding List.append_def by transfer_prover lemma rev_transfer [transfer_rule]: "(list_all2 A ===> list_all2 A) rev rev" unfolding List.rev_def by transfer_prover lemma filter_transfer [transfer_rule]: "((A ===> (=)) ===> list_all2 A ===> list_all2 A) filter filter" unfolding List.filter_def by transfer_prover lemma fold_transfer [transfer_rule]: "((A ===> B ===> B) ===> list_all2 A ===> B ===> B) fold fold" unfolding List.fold_def by transfer_prover lemma foldr_transfer [transfer_rule]: "((A ===> B ===> B) ===> list_all2 A ===> B ===> B) foldr foldr" unfolding List.foldr_def by transfer_prover lemma foldl_transfer [transfer_rule]: "((B ===> A ===> B) ===> B ===> list_all2 A ===> B) foldl foldl" unfolding List.foldl_def by transfer_prover lemma concat_transfer [transfer_rule]: "(list_all2 (list_all2 A) ===> list_all2 A) concat concat" unfolding List.concat_def by transfer_prover lemma drop_transfer [transfer_rule]: "((=) ===> list_all2 A ===> list_all2 A) drop drop" unfolding List.drop_def by transfer_prover lemma take_transfer [transfer_rule]: "((=) ===> list_all2 A ===> list_all2 A) take take" unfolding List.take_def by transfer_prover lemma list_update_transfer [transfer_rule]: "(list_all2 A ===> (=) ===> A ===> list_all2 A) list_update list_update" unfolding list_update_def by transfer_prover lemma takeWhile_transfer [transfer_rule]: "((A ===> (=)) ===> list_all2 A ===> list_all2 A) takeWhile takeWhile" unfolding takeWhile_def by transfer_prover lemma dropWhile_transfer [transfer_rule]: "((A ===> (=)) ===> list_all2 A ===> list_all2 A) dropWhile dropWhile" unfolding dropWhile_def by transfer_prover lemma zip_transfer [transfer_rule]: "(list_all2 A ===> list_all2 B ===> list_all2 (rel_prod A B)) zip zip" unfolding zip_def by transfer_prover lemma product_transfer [transfer_rule]: "(list_all2 A ===> list_all2 B ===> list_all2 (rel_prod A B)) List.product List.product" unfolding List.product_def by transfer_prover lemma product_lists_transfer [transfer_rule]: "(list_all2 (list_all2 A) ===> list_all2 (list_all2 A)) product_lists product_lists" unfolding product_lists_def by transfer_prover lemma insert_transfer [transfer_rule]: assumes [transfer_rule]: "bi_unique A" shows "(A ===> list_all2 A ===> list_all2 A) List.insert List.insert" unfolding List.insert_def [abs_def] by transfer_prover lemma find_transfer [transfer_rule]: "((A ===> (=)) ===> list_all2 A ===> rel_option A) List.find List.find" unfolding List.find_def by transfer_prover lemma those_transfer [transfer_rule]: "(list_all2 (rel_option P) ===> rel_option (list_all2 P)) those those" unfolding List.those_def by transfer_prover lemma remove1_transfer [transfer_rule]: assumes [transfer_rule]: "bi_unique A" shows "(A ===> list_all2 A ===> list_all2 A) remove1 remove1" unfolding remove1_def by transfer_prover lemma removeAll_transfer [transfer_rule]: assumes [transfer_rule]: "bi_unique A" shows "(A ===> list_all2 A ===> list_all2 A) removeAll removeAll" unfolding removeAll_def by transfer_prover lemma successively_transfer [transfer_rule]: "((A ===> A ===> (=)) ===> list_all2 A ===> (=)) successively successively" unfolding successively_altdef by transfer_prover lemma distinct_transfer [transfer_rule]: assumes [transfer_rule]: "bi_unique A" shows "(list_all2 A ===> (=)) distinct distinct" unfolding distinct_def by transfer_prover lemma distinct_adj_transfer [transfer_rule]: assumes "bi_unique A" shows "(list_all2 A ===> (=)) distinct_adj distinct_adj" unfolding rel_fun_def proof (intro allI impI) fix xs ys assume "list_all2 A xs ys" thus "distinct_adj xs \ distinct_adj ys" proof (induction rule: list_all2_induct) case (Cons x xs y ys) note * = this show ?case proof (cases xs) case [simp]: (Cons x' xs') with * obtain y' ys' where [simp]: "ys = y' # ys'" by (cases ys) auto from * show ?thesis using assms by (auto simp: distinct_adj_Cons bi_unique_def) qed (use * in auto) qed auto qed lemma remdups_transfer [transfer_rule]: assumes [transfer_rule]: "bi_unique A" shows "(list_all2 A ===> list_all2 A) remdups remdups" unfolding remdups_def by transfer_prover lemma remdups_adj_transfer [transfer_rule]: assumes [transfer_rule]: "bi_unique A" shows "(list_all2 A ===> list_all2 A) remdups_adj remdups_adj" proof (rule rel_funI, erule list_all2_induct) qed (auto simp: remdups_adj_Cons assms[unfolded bi_unique_def] split: list.splits) lemma replicate_transfer [transfer_rule]: "((=) ===> A ===> list_all2 A) replicate replicate" unfolding replicate_def by transfer_prover lemma length_transfer [transfer_rule]: "(list_all2 A ===> (=)) length length" unfolding size_list_overloaded_def size_list_def by transfer_prover lemma rotate1_transfer [transfer_rule]: "(list_all2 A ===> list_all2 A) rotate1 rotate1" unfolding rotate1_def by transfer_prover lemma rotate_transfer [transfer_rule]: "((=) ===> list_all2 A ===> list_all2 A) rotate rotate" unfolding rotate_def [abs_def] by transfer_prover lemma nths_transfer [transfer_rule]: "(list_all2 A ===> rel_set (=) ===> list_all2 A) nths nths" unfolding nths_def [abs_def] by transfer_prover lemma subseqs_transfer [transfer_rule]: "(list_all2 A ===> list_all2 (list_all2 A)) subseqs subseqs" unfolding subseqs_def [abs_def] by transfer_prover lemma partition_transfer [transfer_rule]: "((A ===> (=)) ===> list_all2 A ===> rel_prod (list_all2 A) (list_all2 A)) partition partition" unfolding partition_def by transfer_prover lemma lists_transfer [transfer_rule]: "(rel_set A ===> rel_set (list_all2 A)) lists lists" proof (rule rel_funI, rule rel_setI) show "\l \ lists X; rel_set A X Y\ \ \y\lists Y. list_all2 A l y" for X Y l proof (induction l rule: lists.induct) case (Cons a l) then show ?case by (simp only: rel_set_def list_all2_Cons1, metis lists.Cons) qed auto show "\l \ lists Y; rel_set A X Y\ \ \x\lists X. list_all2 A x l" for X Y l proof (induction l rule: lists.induct) case (Cons a l) then show ?case by (simp only: rel_set_def list_all2_Cons2, metis lists.Cons) qed auto qed lemma set_Cons_transfer [transfer_rule]: "(rel_set A ===> rel_set (list_all2 A) ===> rel_set (list_all2 A)) set_Cons set_Cons" unfolding rel_fun_def rel_set_def set_Cons_def by (fastforce simp add: list_all2_Cons1 list_all2_Cons2) lemma listset_transfer [transfer_rule]: "(list_all2 (rel_set A) ===> rel_set (list_all2 A)) listset listset" unfolding listset_def by transfer_prover lemma null_transfer [transfer_rule]: "(list_all2 A ===> (=)) List.null List.null" unfolding rel_fun_def List.null_def by auto lemma list_all_transfer [transfer_rule]: "((A ===> (=)) ===> list_all2 A ===> (=)) list_all list_all" unfolding list_all_iff [abs_def] by transfer_prover lemma list_ex_transfer [transfer_rule]: "((A ===> (=)) ===> list_all2 A ===> (=)) list_ex list_ex" unfolding list_ex_iff [abs_def] by transfer_prover lemma splice_transfer [transfer_rule]: "(list_all2 A ===> list_all2 A ===> list_all2 A) splice splice" apply (rule rel_funI, erule list_all2_induct, simp add: rel_fun_def, simp) apply (rule rel_funI) apply (erule_tac xs=x in list_all2_induct, simp, simp add: rel_fun_def) done lemma shuffles_transfer [transfer_rule]: "(list_all2 A ===> list_all2 A ===> rel_set (list_all2 A)) shuffles shuffles" proof (intro rel_funI, goal_cases) case (1 xs xs' ys ys') thus ?case proof (induction xs ys arbitrary: xs' ys' rule: shuffles.induct) case (3 x xs y ys xs' ys') from "3.prems" obtain x' xs'' where xs': "xs' = x' # xs''" by (cases xs') auto from "3.prems" obtain y' ys'' where ys': "ys' = y' # ys''" by (cases ys') auto have [transfer_rule]: "A x x'" "A y y'" "list_all2 A xs xs''" "list_all2 A ys ys''" using "3.prems" by (simp_all add: xs' ys') have [transfer_rule]: "rel_set (list_all2 A) (shuffles xs (y # ys)) (shuffles xs'' ys')" and [transfer_rule]: "rel_set (list_all2 A) (shuffles (x # xs) ys) (shuffles xs' ys'')" using "3.prems" by (auto intro!: "3.IH" simp: xs' ys') have "rel_set (list_all2 A) ((#) x ` shuffles xs (y # ys) \ (#) y ` shuffles (x # xs) ys) ((#) x' ` shuffles xs'' ys' \ (#) y' ` shuffles xs' ys'')" by transfer_prover thus ?case by (simp add: xs' ys') qed (auto simp: rel_set_def) qed lemma rtrancl_parametric [transfer_rule]: assumes [transfer_rule]: "bi_unique A" "bi_total A" shows "(rel_set (rel_prod A A) ===> rel_set (rel_prod A A)) rtrancl rtrancl" unfolding rtrancl_def by transfer_prover lemma monotone_parametric [transfer_rule]: assumes [transfer_rule]: "bi_total A" shows "((A ===> A ===> (=)) ===> (B ===> B ===> (=)) ===> (A ===> B) ===> (=)) monotone monotone" unfolding monotone_def[abs_def] by transfer_prover lemma fun_ord_parametric [transfer_rule]: assumes [transfer_rule]: "bi_total C" shows "((A ===> B ===> (=)) ===> (C ===> A) ===> (C ===> B) ===> (=)) fun_ord fun_ord" unfolding fun_ord_def[abs_def] by transfer_prover lemma fun_lub_parametric [transfer_rule]: assumes [transfer_rule]: "bi_total A" "bi_unique A" shows "((rel_set A ===> B) ===> rel_set (C ===> A) ===> C ===> B) fun_lub fun_lub" unfolding fun_lub_def[abs_def] by transfer_prover end end diff --git a/src/HOL/Map.thy b/src/HOL/Map.thy --- a/src/HOL/Map.thy +++ b/src/HOL/Map.thy @@ -1,942 +1,946 @@ (* Title: HOL/Map.thy Author: Tobias Nipkow, based on a theory by David von Oheimb Copyright 1997-2003 TU Muenchen The datatype of "maps"; strongly resembles maps in VDM. *) section \Maps\ theory Map imports List abbrevs "(=" = "\\<^sub>m" begin type_synonym ('a, 'b) "map" = "'a \ 'b option" (infixr "\" 0) abbreviation empty :: "'a \ 'b" where "empty \ \x. None" definition map_comp :: "('b \ 'c) \ ('a \ 'b) \ ('a \ 'c)" (infixl "\\<^sub>m" 55) where "f \\<^sub>m g = (\k. case g k of None \ None | Some v \ f v)" definition map_add :: "('a \ 'b) \ ('a \ 'b) \ ('a \ 'b)" (infixl "++" 100) where "m1 ++ m2 = (\x. case m2 x of None \ m1 x | Some y \ Some y)" definition restrict_map :: "('a \ 'b) \ 'a set \ ('a \ 'b)" (infixl "|`" 110) where "m|`A = (\x. if x \ A then m x else None)" notation (latex output) restrict_map ("_\\<^bsub>_\<^esub>" [111,110] 110) definition dom :: "('a \ 'b) \ 'a set" where "dom m = {a. m a \ None}" definition ran :: "('a \ 'b) \ 'b set" where "ran m = {b. \a. m a = Some b}" definition graph :: "('a \ 'b) \ ('a \ 'b) set" where "graph m = {(a, b) | a b. m a = Some b}" definition map_le :: "('a \ 'b) \ ('a \ 'b) \ bool" (infix "\\<^sub>m" 50) where "(m\<^sub>1 \\<^sub>m m\<^sub>2) \ (\a \ dom m\<^sub>1. m\<^sub>1 a = m\<^sub>2 a)" nonterminal maplets and maplet syntax "_maplet" :: "['a, 'a] \ maplet" ("_ /\/ _") "_maplets" :: "['a, 'a] \ maplet" ("_ /[\]/ _") "" :: "maplet \ maplets" ("_") "_Maplets" :: "[maplet, maplets] \ maplets" ("_,/ _") "_MapUpd" :: "['a \ 'b, maplets] \ 'a \ 'b" ("_/'(_')" [900, 0] 900) "_Map" :: "maplets \ 'a \ 'b" ("(1[_])") syntax (ASCII) "_maplet" :: "['a, 'a] \ maplet" ("_ /|->/ _") "_maplets" :: "['a, 'a] \ maplet" ("_ /[|->]/ _") translations "_MapUpd m (_Maplets xy ms)" \ "_MapUpd (_MapUpd m xy) ms" "_MapUpd m (_maplet x y)" \ "m(x := CONST Some y)" "_Map ms" \ "_MapUpd (CONST empty) ms" "_Map (_Maplets ms1 ms2)" \ "_MapUpd (_Map ms1) ms2" "_Maplets ms1 (_Maplets ms2 ms3)" \ "_Maplets (_Maplets ms1 ms2) ms3" primrec map_of :: "('a \ 'b) list \ 'a \ 'b" where "map_of [] = empty" | "map_of (p # ps) = (map_of ps)(fst p \ snd p)" definition map_upds :: "('a \ 'b) \ 'a list \ 'b list \ 'a \ 'b" where "map_upds m xs ys = m ++ map_of (rev (zip xs ys))" translations "_MapUpd m (_maplets x y)" \ "CONST map_upds m x y" lemma map_of_Cons_code [code]: "map_of [] k = None" "map_of ((l, v) # ps) k = (if l = k then Some v else map_of ps k)" by simp_all subsection \@{term [source] empty}\ lemma empty_upd_none [simp]: "empty(x := None) = empty" by (rule ext) simp subsection \@{term [source] map_upd}\ lemma map_upd_triv: "t k = Some x \ t(k\x) = t" by (rule ext) simp lemma map_upd_nonempty [simp]: "t(k\x) \ empty" proof assume "t(k \ x) = empty" then have "(t(k \ x)) k = None" by simp then show False by simp qed lemma map_upd_eqD1: assumes "m(a\x) = n(a\y)" shows "x = y" proof - from assms have "(m(a\x)) a = (n(a\y)) a" by simp then show ?thesis by simp qed lemma map_upd_Some_unfold: "((m(a\b)) x = Some y) = (x = a \ b = y \ x \ a \ m x = Some y)" by auto lemma image_map_upd [simp]: "x \ A \ m(x \ y) ` A = m ` A" by auto lemma finite_range_updI: assumes "finite (range f)" shows "finite (range (f(a\b)))" proof - have "range (f(a\b)) \ insert (Some b) (range f)" by auto then show ?thesis by (rule finite_subset) (use assms in auto) qed subsection \@{term [source] map_of}\ lemma map_of_eq_empty_iff [simp]: "map_of xys = empty \ xys = []" proof show "map_of xys = empty \ xys = []" by (induction xys) simp_all qed simp lemma empty_eq_map_of_iff [simp]: "empty = map_of xys \ xys = []" by(subst eq_commute) simp lemma map_of_eq_None_iff: "(map_of xys x = None) = (x \ fst ` (set xys))" by (induct xys) simp_all lemma map_of_eq_Some_iff [simp]: "distinct(map fst xys) \ (map_of xys x = Some y) = ((x,y) \ set xys)" proof (induct xys) case (Cons xy xys) then show ?case by (cases xy) (auto simp flip: map_of_eq_None_iff) qed auto lemma Some_eq_map_of_iff [simp]: "distinct(map fst xys) \ (Some y = map_of xys x) = ((x,y) \ set xys)" by (auto simp del: map_of_eq_Some_iff simp: map_of_eq_Some_iff [symmetric]) lemma map_of_is_SomeI [simp]: "\distinct(map fst xys); (x,y) \ set xys\ \ map_of xys x = Some y" by simp lemma map_of_zip_is_None [simp]: "length xs = length ys \ (map_of (zip xs ys) x = None) = (x \ set xs)" by (induct rule: list_induct2) simp_all lemma map_of_zip_is_Some: assumes "length xs = length ys" shows "x \ set xs \ (\y. map_of (zip xs ys) x = Some y)" using assms by (induct rule: list_induct2) simp_all lemma map_of_zip_upd: fixes x :: 'a and xs :: "'a list" and ys zs :: "'b list" assumes "length ys = length xs" and "length zs = length xs" and "x \ set xs" and "map_of (zip xs ys)(x \ y) = map_of (zip xs zs)(x \ z)" shows "map_of (zip xs ys) = map_of (zip xs zs)" proof fix x' :: 'a show "map_of (zip xs ys) x' = map_of (zip xs zs) x'" proof (cases "x = x'") case True from assms True map_of_zip_is_None [of xs ys x'] have "map_of (zip xs ys) x' = None" by simp moreover from assms True map_of_zip_is_None [of xs zs x'] have "map_of (zip xs zs) x' = None" by simp ultimately show ?thesis by simp next case False from assms have "(map_of (zip xs ys)(x \ y)) x' = (map_of (zip xs zs)(x \ z)) x'" by auto with False show ?thesis by simp qed qed lemma map_of_zip_inject: assumes "length ys = length xs" and "length zs = length xs" and dist: "distinct xs" and map_of: "map_of (zip xs ys) = map_of (zip xs zs)" shows "ys = zs" using assms(1) assms(2)[symmetric] using dist map_of proof (induct ys xs zs rule: list_induct3) case Nil show ?case by simp next case (Cons y ys x xs z zs) from \map_of (zip (x#xs) (y#ys)) = map_of (zip (x#xs) (z#zs))\ have map_of: "map_of (zip xs ys)(x \ y) = map_of (zip xs zs)(x \ z)" by simp from Cons have "length ys = length xs" and "length zs = length xs" and "x \ set xs" by simp_all then have "map_of (zip xs ys) = map_of (zip xs zs)" using map_of by (rule map_of_zip_upd) with Cons.hyps \distinct (x # xs)\ have "ys = zs" by simp moreover from map_of have "y = z" by (rule map_upd_eqD1) ultimately show ?case by simp qed lemma map_of_zip_nth: assumes "length xs = length ys" assumes "distinct xs" assumes "i < length ys" shows "map_of (zip xs ys) (xs ! i) = Some (ys ! i)" using assms proof (induct arbitrary: i rule: list_induct2) case Nil then show ?case by simp next case (Cons x xs y ys) then show ?case using less_Suc_eq_0_disj by auto qed lemma map_of_zip_map: "map_of (zip xs (map f xs)) = (\x. if x \ set xs then Some (f x) else None)" by (induct xs) (simp_all add: fun_eq_iff) lemma finite_range_map_of: "finite (range (map_of xys))" proof (induct xys) case (Cons a xys) then show ?case using finite_range_updI by fastforce qed auto lemma map_of_SomeD: "map_of xs k = Some y \ (k, y) \ set xs" by (induct xs) (auto split: if_splits) lemma map_of_mapk_SomeI: "inj f \ map_of t k = Some x \ map_of (map (case_prod (\k. Pair (f k))) t) (f k) = Some x" by (induct t) (auto simp: inj_eq) lemma weak_map_of_SomeI: "(k, x) \ set l \ \x. map_of l k = Some x" by (induct l) auto lemma map_of_filter_in: "map_of xs k = Some z \ P k z \ map_of (filter (case_prod P) xs) k = Some z" by (induct xs) auto lemma map_of_map: "map_of (map (\(k, v). (k, f v)) xs) = map_option f \ map_of xs" by (induct xs) (auto simp: fun_eq_iff) lemma dom_map_option: "dom (\k. map_option (f k) (m k)) = dom m" by (simp add: dom_def) lemma dom_map_option_comp [simp]: "dom (map_option g \ m) = dom m" using dom_map_option [of "\_. g" m] by (simp add: comp_def) subsection \\<^const>\map_option\ related\ lemma map_option_o_empty [simp]: "map_option f \ empty = empty" by (rule ext) simp lemma map_option_o_map_upd [simp]: "map_option f \ m(a\b) = (map_option f \ m)(a\f b)" by (rule ext) simp subsection \@{term [source] map_comp} related\ lemma map_comp_empty [simp]: "m \\<^sub>m empty = empty" "empty \\<^sub>m m = empty" by (auto simp: map_comp_def split: option.splits) lemma map_comp_simps [simp]: "m2 k = None \ (m1 \\<^sub>m m2) k = None" "m2 k = Some k' \ (m1 \\<^sub>m m2) k = m1 k'" by (auto simp: map_comp_def) lemma map_comp_Some_iff: "((m1 \\<^sub>m m2) k = Some v) = (\k'. m2 k = Some k' \ m1 k' = Some v)" by (auto simp: map_comp_def split: option.splits) lemma map_comp_None_iff: "((m1 \\<^sub>m m2) k = None) = (m2 k = None \ (\k'. m2 k = Some k' \ m1 k' = None)) " by (auto simp: map_comp_def split: option.splits) subsection \\++\\ lemma map_add_empty[simp]: "m ++ empty = m" by(simp add: map_add_def) lemma empty_map_add[simp]: "empty ++ m = m" by (rule ext) (simp add: map_add_def split: option.split) lemma map_add_assoc[simp]: "m1 ++ (m2 ++ m3) = (m1 ++ m2) ++ m3" by (rule ext) (simp add: map_add_def split: option.split) lemma map_add_Some_iff: "((m ++ n) k = Some x) = (n k = Some x \ n k = None \ m k = Some x)" by (simp add: map_add_def split: option.split) lemma map_add_SomeD [dest!]: "(m ++ n) k = Some x \ n k = Some x \ n k = None \ m k = Some x" by (rule map_add_Some_iff [THEN iffD1]) lemma map_add_find_right [simp]: "n k = Some xx \ (m ++ n) k = Some xx" by (subst map_add_Some_iff) fast lemma map_add_None [iff]: "((m ++ n) k = None) = (n k = None \ m k = None)" by (simp add: map_add_def split: option.split) lemma map_add_upd[simp]: "f ++ g(x\y) = (f ++ g)(x\y)" by (rule ext) (simp add: map_add_def) lemma map_add_upds[simp]: "m1 ++ (m2(xs[\]ys)) = (m1++m2)(xs[\]ys)" by (simp add: map_upds_def) lemma map_add_upd_left: "m\dom e2 \ e1(m \ u1) ++ e2 = (e1 ++ e2)(m \ u1)" by (rule ext) (auto simp: map_add_def dom_def split: option.split) lemma map_of_append[simp]: "map_of (xs @ ys) = map_of ys ++ map_of xs" unfolding map_add_def proof (induct xs) case (Cons a xs) then show ?case by (force split: option.split) qed auto lemma finite_range_map_of_map_add: "finite (range f) \ finite (range (f ++ map_of l))" proof (induct l) case (Cons a l) then show ?case by (metis finite_range_updI map_add_upd map_of.simps(2)) qed auto lemma inj_on_map_add_dom [iff]: "inj_on (m ++ m') (dom m') = inj_on m' (dom m')" by (fastforce simp: map_add_def dom_def inj_on_def split: option.splits) lemma map_upds_fold_map_upd: "m(ks[\]vs) = foldl (\m (k, v). m(k \ v)) m (zip ks vs)" unfolding map_upds_def proof (rule sym, rule zip_obtain_same_length) fix ks :: "'a list" and vs :: "'b list" assume "length ks = length vs" then show "foldl (\m (k, v). m(k\v)) m (zip ks vs) = m ++ map_of (rev (zip ks vs))" by(induct arbitrary: m rule: list_induct2) simp_all qed lemma map_add_map_of_foldr: "m ++ map_of ps = foldr (\(k, v) m. m(k \ v)) ps m" by (induct ps) (auto simp: fun_eq_iff map_add_def) subsection \@{term [source] restrict_map}\ lemma restrict_map_to_empty [simp]: "m|`{} = empty" by (simp add: restrict_map_def) lemma restrict_map_insert: "f |` (insert a A) = (f |` A)(a := f a)" by (auto simp: restrict_map_def) lemma restrict_map_empty [simp]: "empty|`D = empty" by (simp add: restrict_map_def) lemma restrict_in [simp]: "x \ A \ (m|`A) x = m x" by (simp add: restrict_map_def) lemma restrict_out [simp]: "x \ A \ (m|`A) x = None" by (simp add: restrict_map_def) lemma ran_restrictD: "y \ ran (m|`A) \ \x\A. m x = Some y" by (auto simp: restrict_map_def ran_def split: if_split_asm) lemma dom_restrict [simp]: "dom (m|`A) = dom m \ A" by (auto simp: restrict_map_def dom_def split: if_split_asm) lemma restrict_upd_same [simp]: "m(x\y)|`(-{x}) = m|`(-{x})" by (rule ext) (auto simp: restrict_map_def) lemma restrict_restrict [simp]: "m|`A|`B = m|`(A\B)" by (rule ext) (auto simp: restrict_map_def) lemma restrict_fun_upd [simp]: "m(x := y)|`D = (if x \ D then (m|`(D-{x}))(x := y) else m|`D)" by (simp add: restrict_map_def fun_eq_iff) lemma fun_upd_None_restrict [simp]: "(m|`D)(x := None) = (if x \ D then m|`(D - {x}) else m|`D)" by (simp add: restrict_map_def fun_eq_iff) lemma fun_upd_restrict: "(m|`D)(x := y) = (m|`(D-{x}))(x := y)" by (simp add: restrict_map_def fun_eq_iff) lemma fun_upd_restrict_conv [simp]: "x \ D \ (m|`D)(x := y) = (m|`(D-{x}))(x := y)" by (rule fun_upd_restrict) lemma map_of_map_restrict: "map_of (map (\k. (k, f k)) ks) = (Some \ f) |` set ks" by (induct ks) (simp_all add: fun_eq_iff restrict_map_insert) lemma restrict_complement_singleton_eq: "f |` (- {x}) = f(x := None)" by auto subsection \@{term [source] map_upds}\ lemma map_upds_Nil1 [simp]: "m([] [\] bs) = m" by (simp add: map_upds_def) lemma map_upds_Nil2 [simp]: "m(as [\] []) = m" by (simp add:map_upds_def) lemma map_upds_Cons [simp]: "m(a#as [\] b#bs) = (m(a\b))(as[\]bs)" by (simp add:map_upds_def) lemma map_upds_append1 [simp]: "size xs < size ys \ m(xs@[x] [\] ys) = m(xs [\] ys)(x \ ys!size xs)" proof (induct xs arbitrary: ys m) case Nil then show ?case by (auto simp: neq_Nil_conv) next case (Cons a xs) then show ?case by (cases ys) auto qed lemma map_upds_list_update2_drop [simp]: "size xs \ i \ m(xs[\]ys[i:=y]) = m(xs[\]ys)" proof (induct xs arbitrary: m ys i) case Nil then show ?case by auto next case (Cons a xs) then show ?case by (cases ys) (use Cons in \auto split: nat.split\) qed text \Something weirdly sensitive about this proof, which needs only four lines in apply style\ lemma map_upd_upds_conv_if: "(f(x\y))(xs [\] ys) = (if x \ set(take (length ys) xs) then f(xs [\] ys) else (f(xs [\] ys))(x\y))" proof (induct xs arbitrary: x y ys f) case (Cons a xs) show ?case proof (cases ys) case (Cons z zs) then show ?thesis using Cons.hyps apply (auto split: if_split simp: fun_upd_twist) using Cons.hyps apply fastforce+ done qed auto qed auto lemma map_upds_twist [simp]: "a \ set as \ m(a\b)(as[\]bs) = m(as[\]bs)(a\b)" using set_take_subset by (fastforce simp add: map_upd_upds_conv_if) lemma map_upds_apply_nontin [simp]: "x \ set xs \ (f(xs[\]ys)) x = f x" proof (induct xs arbitrary: ys) case (Cons a xs) then show ?case by (cases ys) (auto simp: map_upd_upds_conv_if) qed auto lemma fun_upds_append_drop [simp]: "size xs = size ys \ m(xs@zs[\]ys) = m(xs[\]ys)" proof (induct xs arbitrary: ys) case (Cons a xs) then show ?case by (cases ys) (auto simp: map_upd_upds_conv_if) qed auto lemma fun_upds_append2_drop [simp]: "size xs = size ys \ m(xs[\]ys@zs) = m(xs[\]ys)" proof (induct xs arbitrary: ys) case (Cons a xs) then show ?case by (cases ys) (auto simp: map_upd_upds_conv_if) qed auto lemma restrict_map_upds[simp]: "\ length xs = length ys; set xs \ D \ \ m(xs [\] ys)|`D = (m|`(D - set xs))(xs [\] ys)" proof (induct xs arbitrary: m ys) case (Cons a xs) then show ?case proof (cases ys) case (Cons z zs) with Cons.hyps Cons.prems show ?thesis apply (simp add: insert_absorb flip: Diff_insert) apply (auto simp add: map_upd_upds_conv_if) done qed auto qed auto subsection \@{term [source] dom}\ lemma dom_eq_empty_conv [simp]: "dom f = {} \ f = empty" by (auto simp: dom_def) lemma domI: "m a = Some b \ a \ dom m" by (simp add: dom_def) (* declare domI [intro]? *) lemma domD: "a \ dom m \ \b. m a = Some b" by (cases "m a") (auto simp add: dom_def) lemma domIff [iff, simp del, code_unfold]: "a \ dom m \ m a \ None" by (simp add: dom_def) lemma dom_empty [simp]: "dom empty = {}" by (simp add: dom_def) lemma dom_fun_upd [simp]: "dom(f(x := y)) = (if y = None then dom f - {x} else insert x (dom f))" by (auto simp: dom_def) lemma dom_if: "dom (\x. if P x then f x else g x) = dom f \ {x. P x} \ dom g \ {x. \ P x}" by (auto split: if_splits) lemma dom_map_of_conv_image_fst: "dom (map_of xys) = fst ` set xys" by (induct xys) (auto simp add: dom_if) lemma dom_map_of_zip [simp]: "length xs = length ys \ dom (map_of (zip xs ys)) = set xs" by (induct rule: list_induct2) (auto simp: dom_if) lemma finite_dom_map_of: "finite (dom (map_of l))" by (induct l) (auto simp: dom_def insert_Collect [symmetric]) lemma dom_map_upds [simp]: "dom(m(xs[\]ys)) = set(take (length ys) xs) \ dom m" proof (induct xs arbitrary: ys) case (Cons a xs) then show ?case by (cases ys) (auto simp: map_upd_upds_conv_if) qed auto lemma dom_map_add [simp]: "dom (m ++ n) = dom n \ dom m" by (auto simp: dom_def) lemma dom_override_on [simp]: "dom (override_on f g A) = (dom f - {a. a \ A - dom g}) \ {a. a \ A \ dom g}" by (auto simp: dom_def override_on_def) lemma map_add_comm: "dom m1 \ dom m2 = {} \ m1 ++ m2 = m2 ++ m1" by (rule ext) (force simp: map_add_def dom_def split: option.split) lemma map_add_dom_app_simps: "m \ dom l2 \ (l1 ++ l2) m = l2 m" "m \ dom l1 \ (l1 ++ l2) m = l2 m" "m \ dom l2 \ (l1 ++ l2) m = l1 m" by (auto simp add: map_add_def split: option.split_asm) lemma dom_const [simp]: "dom (\x. Some (f x)) = UNIV" by auto (* Due to John Matthews - could be rephrased with dom *) lemma finite_map_freshness: "finite (dom (f :: 'a \ 'b)) \ \ finite (UNIV :: 'a set) \ \x. f x = None" by (bestsimp dest: ex_new_if_finite) lemma dom_minus: "f x = None \ dom f - insert x A = dom f - A" unfolding dom_def by simp lemma insert_dom: "f x = Some y \ insert x (dom f) = dom f" unfolding dom_def by auto lemma map_of_map_keys: "set xs = dom m \ map_of (map (\k. (k, the (m k))) xs) = m" by (rule ext) (auto simp add: map_of_map_restrict restrict_map_def) lemma map_of_eqI: assumes set_eq: "set (map fst xs) = set (map fst ys)" assumes map_eq: "\k\set (map fst xs). map_of xs k = map_of ys k" shows "map_of xs = map_of ys" proof (rule ext) fix k show "map_of xs k = map_of ys k" proof (cases "map_of xs k") case None then have "k \ set (map fst xs)" by (simp add: map_of_eq_None_iff) with set_eq have "k \ set (map fst ys)" by simp then have "map_of ys k = None" by (simp add: map_of_eq_None_iff) with None show ?thesis by simp next case (Some v) then have "k \ set (map fst xs)" by (auto simp add: dom_map_of_conv_image_fst [symmetric]) with map_eq show ?thesis by auto qed qed lemma map_of_eq_dom: assumes "map_of xs = map_of ys" shows "fst ` set xs = fst ` set ys" proof - from assms have "dom (map_of xs) = dom (map_of ys)" by simp then show ?thesis by (simp add: dom_map_of_conv_image_fst) qed lemma finite_set_of_finite_maps: assumes "finite A" "finite B" shows "finite {m. dom m = A \ ran m \ B}" (is "finite ?S") proof - let ?S' = "{m. \x. (x \ A \ m x \ Some ` B) \ (x \ A \ m x = None)}" have "?S = ?S'" proof show "?S \ ?S'" by (auto simp: dom_def ran_def image_def) show "?S' \ ?S" proof fix m assume "m \ ?S'" hence 1: "dom m = A" by force hence 2: "ran m \ B" using \m \ ?S'\ by (auto simp: dom_def ran_def) from 1 2 show "m \ ?S" by blast qed qed with assms show ?thesis by(simp add: finite_set_of_finite_funs) qed subsection \@{term [source] ran}\ lemma ranI: "m a = Some b \ b \ ran m" by (auto simp: ran_def) (* declare ranI [intro]? *) lemma ran_empty [simp]: "ran empty = {}" by (auto simp: ran_def) lemma ran_map_upd [simp]: "m a = None \ ran(m(a\b)) = insert b (ran m)" unfolding ran_def by force lemma fun_upd_None_if_notin_dom[simp]: "k \ dom m \ m(k := None) = m" by auto +lemma ran_map_upd_Some: + "\ m x = Some y; inj_on m (dom m); z \ ran m \ \ ran(m(x := Some z)) = ran m - {y} \ {z}" +by(force simp add: ran_def domI inj_onD) + lemma ran_map_add: assumes "dom m1 \ dom m2 = {}" shows "ran (m1 ++ m2) = ran m1 \ ran m2" proof show "ran (m1 ++ m2) \ ran m1 \ ran m2" unfolding ran_def by auto next show "ran m1 \ ran m2 \ ran (m1 ++ m2)" proof - have "(m1 ++ m2) x = Some y" if "m1 x = Some y" for x y using assms map_add_comm that by fastforce moreover have "(m1 ++ m2) x = Some y" if "m2 x = Some y" for x y using assms that by auto ultimately show ?thesis unfolding ran_def by blast qed qed lemma finite_ran: assumes "finite (dom p)" shows "finite (ran p)" proof - have "ran p = (\x. the (p x)) ` dom p" unfolding ran_def by force from this \finite (dom p)\ show ?thesis by auto qed lemma ran_distinct: assumes dist: "distinct (map fst al)" shows "ran (map_of al) = snd ` set al" using assms proof (induct al) case Nil then show ?case by simp next case (Cons kv al) then have "ran (map_of al) = snd ` set al" by simp moreover from Cons.prems have "map_of al (fst kv) = None" by (simp add: map_of_eq_None_iff) ultimately show ?case by (simp only: map_of.simps ran_map_upd) simp qed lemma ran_map_of_zip: assumes "length xs = length ys" "distinct xs" shows "ran (map_of (zip xs ys)) = set ys" using assms by (simp add: ran_distinct set_map[symmetric]) lemma ran_map_option: "ran (\x. map_option f (m x)) = f ` ran m" by (auto simp add: ran_def) subsection \@{term [source] graph}\ lemma graph_empty[simp]: "graph empty = {}" unfolding graph_def by simp lemma in_graphI: "m k = Some v \ (k, v) \ graph m" unfolding graph_def by blast lemma in_graphD: "(k, v) \ graph m \ m k = Some v" unfolding graph_def by blast lemma graph_map_upd[simp]: "graph (m(k \ v)) = insert (k, v) (graph (m(k := None)))" unfolding graph_def by (auto split: if_splits) lemma graph_fun_upd_None: "graph (m(k := None)) = {e \ graph m. fst e \ k}" unfolding graph_def by (auto split: if_splits) lemma graph_restrictD: assumes "(k, v) \ graph (m |` A)" shows "k \ A" and "m k = Some v" using assms unfolding graph_def by (auto simp: restrict_map_def split: if_splits) lemma graph_map_comp[simp]: "graph (m1 \\<^sub>m m2) = graph m2 O graph m1" unfolding graph_def by (auto simp: map_comp_Some_iff relcomp_unfold) lemma graph_map_add: "dom m1 \ dom m2 = {} \ graph (m1 ++ m2) = graph m1 \ graph m2" unfolding graph_def using map_add_comm by force lemma graph_eq_to_snd_dom: "graph m = (\x. (x, the (m x))) ` dom m" unfolding graph_def dom_def by force lemma fst_graph_eq_dom: "fst ` graph m = dom m" unfolding graph_eq_to_snd_dom by force lemma graph_domD: "x \ graph m \ fst x \ dom m" using fst_graph_eq_dom by (metis imageI) lemma snd_graph_ran: "snd ` graph m = ran m" unfolding graph_def ran_def by force lemma graph_ranD: "x \ graph m \ snd x \ ran m" using snd_graph_ran by (metis imageI) lemma finite_graph_map_of: "finite (graph (map_of al))" unfolding graph_eq_to_snd_dom finite_dom_map_of using finite_dom_map_of by blast lemma graph_map_of_if_distinct_dom: "distinct (map fst al) \ graph (map_of al) = set al" unfolding graph_def by auto lemma finite_graph_iff_finite_dom[simp]: "finite (graph m) = finite (dom m)" by (metis graph_eq_to_snd_dom finite_imageI fst_graph_eq_dom) lemma inj_on_fst_graph: "inj_on fst (graph m)" unfolding graph_def inj_on_def by force subsection \\map_le\\ lemma map_le_empty [simp]: "empty \\<^sub>m g" by (simp add: map_le_def) lemma upd_None_map_le [simp]: "f(x := None) \\<^sub>m f" by (force simp add: map_le_def) lemma map_le_upd[simp]: "f \\<^sub>m g ==> f(a := b) \\<^sub>m g(a := b)" by (fastforce simp add: map_le_def) lemma map_le_imp_upd_le [simp]: "m1 \\<^sub>m m2 \ m1(x := None) \\<^sub>m m2(x \ y)" by (force simp add: map_le_def) lemma map_le_upds [simp]: "f \\<^sub>m g \ f(as [\] bs) \\<^sub>m g(as [\] bs)" proof (induct as arbitrary: f g bs) case (Cons a as) then show ?case by (cases bs) (use Cons in auto) qed auto lemma map_le_implies_dom_le: "(f \\<^sub>m g) \ (dom f \ dom g)" by (fastforce simp add: map_le_def dom_def) lemma map_le_refl [simp]: "f \\<^sub>m f" by (simp add: map_le_def) lemma map_le_trans[trans]: "\ m1 \\<^sub>m m2; m2 \\<^sub>m m3\ \ m1 \\<^sub>m m3" by (auto simp add: map_le_def dom_def) lemma map_le_antisym: "\ f \\<^sub>m g; g \\<^sub>m f \ \ f = g" unfolding map_le_def by (metis ext domIff) lemma map_le_map_add [simp]: "f \\<^sub>m g ++ f" by (fastforce simp: map_le_def) lemma map_le_iff_map_add_commute: "f \\<^sub>m f ++ g \ f ++ g = g ++ f" by (fastforce simp: map_add_def map_le_def fun_eq_iff split: option.splits) lemma map_add_le_mapE: "f ++ g \\<^sub>m h \ g \\<^sub>m h" by (fastforce simp: map_le_def map_add_def dom_def) lemma map_add_le_mapI: "\ f \\<^sub>m h; g \\<^sub>m h \ \ f ++ g \\<^sub>m h" by (auto simp: map_le_def map_add_def dom_def split: option.splits) lemma map_add_subsumed1: "f \\<^sub>m g \ f++g = g" by (simp add: map_add_le_mapI map_le_antisym) lemma map_add_subsumed2: "f \\<^sub>m g \ g++f = g" by (metis map_add_subsumed1 map_le_iff_map_add_commute) lemma dom_eq_singleton_conv: "dom f = {x} \ (\v. f = [x \ v])" (is "?lhs \ ?rhs") proof assume ?rhs then show ?lhs by (auto split: if_split_asm) next assume ?lhs then obtain v where v: "f x = Some v" by auto show ?rhs proof show "f = [x \ v]" proof (rule map_le_antisym) show "[x \ v] \\<^sub>m f" using v by (auto simp add: map_le_def) show "f \\<^sub>m [x \ v]" using \dom f = {x}\ \f x = Some v\ by (auto simp add: map_le_def) qed qed qed lemma map_add_eq_empty_iff[simp]: "(f++g = empty) \ f = empty \ g = empty" by (metis map_add_None) lemma empty_eq_map_add_iff[simp]: "(empty = f++g) \ f = empty \ g = empty" by(subst map_add_eq_empty_iff[symmetric])(rule eq_commute) subsection \Various\ lemma set_map_of_compr: assumes distinct: "distinct (map fst xs)" shows "set xs = {(k, v). map_of xs k = Some v}" using assms proof (induct xs) case Nil then show ?case by simp next case (Cons x xs) obtain k v where "x = (k, v)" by (cases x) blast with Cons.prems have "k \ dom (map_of xs)" by (simp add: dom_map_of_conv_image_fst) then have *: "insert (k, v) {(k, v). map_of xs k = Some v} = {(k', v'). (map_of xs(k \ v)) k' = Some v'}" by (auto split: if_splits) from Cons have "set xs = {(k, v). map_of xs k = Some v}" by simp with * \x = (k, v)\ show ?case by simp qed lemma eq_key_imp_eq_value: "v1 = v2" if "distinct (map fst xs)" "(k, v1) \ set xs" "(k, v2) \ set xs" proof - from that have "inj_on fst (set xs)" by (simp add: distinct_map) moreover have "fst (k, v1) = fst (k, v2)" by simp ultimately have "(k, v1) = (k, v2)" by (rule inj_onD) (fact that)+ then show ?thesis by simp qed lemma map_of_inject_set: assumes distinct: "distinct (map fst xs)" "distinct (map fst ys)" shows "map_of xs = map_of ys \ set xs = set ys" (is "?lhs \ ?rhs") proof assume ?lhs moreover from \distinct (map fst xs)\ have "set xs = {(k, v). map_of xs k = Some v}" by (rule set_map_of_compr) moreover from \distinct (map fst ys)\ have "set ys = {(k, v). map_of ys k = Some v}" by (rule set_map_of_compr) ultimately show ?rhs by simp next assume ?rhs show ?lhs proof fix k show "map_of xs k = map_of ys k" proof (cases "map_of xs k") case None with \?rhs\ have "map_of ys k = None" by (simp add: map_of_eq_None_iff) with None show ?thesis by simp next case (Some v) with distinct \?rhs\ have "map_of ys k = Some v" by simp with Some show ?thesis by simp qed qed qed lemma finite_Map_induct[consumes 1, case_names empty update]: assumes "finite (dom m)" assumes "P Map.empty" assumes "\k v m. finite (dom m) \ k \ dom m \ P m \ P (m(k \ v))" shows "P m" using assms(1) proof(induction "dom m" arbitrary: m rule: finite_induct) case empty then show ?case using assms(2) unfolding dom_def by simp next case (insert x F) then have "finite (dom (m(x:=None)))" "x \ dom (m(x:=None))" "P (m(x:=None))" by (metis Diff_insert_absorb dom_fun_upd)+ with assms(3)[OF this] show ?case by (metis fun_upd_triv fun_upd_upd option.exhaust) qed hide_const (open) Map.empty Map.graph end diff --git a/src/HOL/ROOT b/src/HOL/ROOT --- a/src/HOL/ROOT +++ b/src/HOL/ROOT @@ -1,1210 +1,1210 @@ chapter HOL session HOL (main) = Pure + description " Classical Higher-order Logic. " options [strict_facts] sessions Tools theories Main (global) Complex_Main (global) document_theories Tools.Code_Generator document_files "root.bib" "root.tex" session "HOL-Examples" in Examples = HOL + description " Notable Examples for Isabelle/HOL. " sessions "HOL-Computational_Algebra" theories Adhoc_Overloading_Examples Ackermann Cantor Coherent Commands Drinker Functions Groebner_Examples Iff_Oracle Induction_Schema Knaster_Tarski "ML" Peirce Records + Rewrite_Examples Seq Sqrt document_files "root.bib" "root.tex" session "HOL-Proofs" (timing) in Proofs = Pure + description " HOL-Main with explicit proof terms. " options [quick_and_dirty = false, record_proofs = 2, parallel_limit = 500] sessions "HOL-Library" theories "HOL-Library.Realizers" session "HOL-Library" (main timing) in Library = HOL + description " Classical Higher-order Logic -- batteries included. " theories Library (*conflicting type class instantiations and dependent applications*) Finite_Lattice List_Lexorder List_Lenlexorder Prefix_Order Product_Lexorder Product_Order Subseq_Order (*conflicting syntax*) Datatype_Records (*data refinements and dependent applications*) AList_Mapping Code_Binary_Nat Code_Prolog Code_Real_Approx_By_Float Code_Target_Numeral Code_Target_Numeral_Float Complex_Order DAList DAList_Multiset RBT_Mapping RBT_Set (*printing modifications*) OptionalSugar (*prototypic tools*) Predicate_Compile_Quickcheck (*legacy tools*) Old_Datatype Old_Recdef Realizers Refute document_files "root.bib" "root.tex" session "HOL-Analysis" (main timing) in Analysis = HOL + options [document_tags = "theorem%important,corollary%important,proposition%important,class%important,instantiation%important,subsubsection%unimportant,%unimportant", document_variants = "document:manual=-proof,-ML,-unimportant"] sessions "HOL-Library" "HOL-Combinatorics" "HOL-Computational_Algebra" theories Analysis document_files "root.tex" "root.bib" session "HOL-Complex_Analysis" (main timing) in Complex_Analysis = "HOL-Analysis" + options [document_tags = "theorem%important,corollary%important,proposition%important,class%important,instantiation%important,subsubsection%unimportant,%unimportant", document_variants = "document:manual=-proof,-ML,-unimportant"] theories Complex_Analysis document_files "root.tex" "root.bib" session "HOL-Analysis-ex" in "Analysis/ex" = "HOL-Analysis" + theories Approximations Metric_Arith_Examples session "HOL-Homology" (timing) in Homology = "HOL-Analysis" + options [document_tags = "theorem%important,corollary%important,proposition%important,class%important,instantiation%important,subsubsection%unimportant,%unimportant", document_variants = "document:manual=-proof,-ML,-unimportant"] sessions "HOL-Algebra" theories Homology document_files "root.tex" session "HOL-Combinatorics" (main timing) in "Combinatorics" = "HOL" + sessions "HOL-Library" theories Combinatorics document_files "root.tex" session "HOL-Computational_Algebra" (main timing) in "Computational_Algebra" = "HOL-Library" + theories Computational_Algebra (*conflicting type class instantiations and dependent applications*) Field_as_Ring session "HOL-Real_Asymp" in Real_Asymp = HOL + sessions "HOL-Decision_Procs" theories Real_Asymp Real_Asymp_Approx Real_Asymp_Examples session "HOL-Real_Asymp-Manual" in "Real_Asymp/Manual" = "HOL-Real_Asymp" + theories Real_Asymp_Doc document_files (in "~~/src/Doc") "iman.sty" "extra.sty" "isar.sty" document_files "root.tex" "style.sty" session "HOL-Hahn_Banach" in Hahn_Banach = HOL + description " Author: Gertrud Bauer, TU Munich The Hahn-Banach theorem for real vector spaces. This is the proof of the Hahn-Banach theorem for real vectorspaces, following H. Heuser, Funktionalanalysis, p. 228 -232. The Hahn-Banach theorem is one of the fundamental theorems of functional analysis. It is a conclusion of Zorn's lemma. Two different formaulations of the theorem are presented, one for general real vectorspaces and its application to normed vectorspaces. The theorem says, that every continous linearform, defined on arbitrary subspaces (not only one-dimensional subspaces), can be extended to a continous linearform on the whole vectorspace. " sessions "HOL-Analysis" theories Hahn_Banach document_files "root.bib" "root.tex" session "HOL-Induct" in Induct = HOL + description " Examples of (Co)Inductive Definitions. Comb proves the Church-Rosser theorem for combinators (see http://www.cl.cam.ac.uk/ftp/papers/reports/TR396-lcp-generic-automatic-proof-tools.ps.gz). Mutil is the famous Mutilated Chess Board problem (see http://www.cl.cam.ac.uk/ftp/papers/reports/TR394-lcp-mutilated-chess-board.dvi.gz). PropLog proves the completeness of a formalization of propositional logic (see http://www.cl.cam.ac.uk/Research/Reports/TR312-lcp-set-II.ps.gz). Exp demonstrates the use of iterated inductive definitions to reason about mutually recursive relations. " sessions "HOL-Library" theories [quick_and_dirty] Common_Patterns theories Nested_Datatype QuoDataType QuoNestedDataType Term SList ABexp Infinitely_Branching_Tree Ordinals Sigma_Algebra Comb PropLog Com document_files "root.tex" session "HOL-IMP" (timing) in IMP = "HOL-Library" + options [document_variants = document] theories BExp ASM Finite_Reachable Denotational Compiler2 Poly_Types Sec_Typing Sec_TypingT Def_Init_Big Def_Init_Small Fold Live Live_True Hoare_Examples Hoare_Sound_Complete VCG Hoare_Total VCG_Total_EX VCG_Total_EX2 Collecting1 Collecting_Examples Abs_Int_Tests Abs_Int1_parity Abs_Int1_const Abs_Int3 Procs_Dyn_Vars_Dyn Procs_Stat_Vars_Dyn Procs_Stat_Vars_Stat C_like OO document_files "root.bib" "root.tex" session "HOL-IMPP" in IMPP = HOL + description \ Author: David von Oheimb Copyright 1999 TUM IMPP -- An imperative language with procedures. This is an extension of IMP with local variables and mutually recursive procedures. For documentation see "Hoare Logic for Mutual Recursion and Local Variables" (https://isabelle.in.tum.de/Bali/papers/FSTTCS99.html). \ theories EvenOdd session "HOL-Data_Structures" (timing) in Data_Structures = HOL + options [document_variants = document] sessions "HOL-Number_Theory" theories [document = false] Less_False theories Sorting Balance Tree_Map Interval_Tree AVL_Map AVL_Bal_Set AVL_Bal2_Set Height_Balanced_Tree RBT_Set2 RBT_Map Tree23_Map Tree23_of_List Tree234_Map Brother12_Map AA_Map Set2_Join_RBT Array_Braun Trie_Fun Trie_Map Tries_Binary Queue_2Lists Heaps Leftist_Heap Binomial_Heap Selection document_files "root.tex" "root.bib" session "HOL-Import" in Import = HOL + theories HOL_Light_Maps theories [condition = HOL_LIGHT_BUNDLE] HOL_Light_Import session "HOL-Number_Theory" (main timing) in Number_Theory = "HOL-Computational_Algebra" + description " Fundamental Theorem of Arithmetic, Chinese Remainder Theorem, Fermat/Euler Theorem, Wilson's Theorem, some lemmas for Quadratic Reciprocity. " sessions "HOL-Algebra" theories Number_Theory document_files "root.tex" session "HOL-Hoare" in Hoare = HOL + description " Verification of imperative programs (verification conditions are generated automatically from pre/post conditions and loop invariants). " theories Examples ExamplesAbort ExamplesTC Pointers0 Pointer_Examples Pointer_ExamplesAbort SchorrWaite Separation document_files "root.bib" "root.tex" session "HOL-Hoare_Parallel" (timing) in Hoare_Parallel = HOL + description " Verification of shared-variable imperative programs a la Owicki-Gries. (verification conditions are generated automatically). " theories Hoare_Parallel document_files "root.bib" "root.tex" session "HOL-Codegenerator_Test" in Codegenerator_Test = "HOL-Library" + sessions "HOL-Number_Theory" "HOL-Data_Structures" "HOL-Examples" theories Generate Generate_Binary_Nat Generate_Target_Nat Generate_Efficient_Datastructures Code_Lazy_Test Code_Test_PolyML Code_Test_Scala theories [condition = ISABELLE_GHC] Code_Test_GHC theories [condition = ISABELLE_MLTON] Code_Test_MLton theories [condition = ISABELLE_OCAMLFIND] Code_Test_OCaml theories [condition = ISABELLE_SMLNJ] Code_Test_SMLNJ session "HOL-Metis_Examples" (timing) in Metis_Examples = "HOL-Library" + description " Author: Lawrence C Paulson, Cambridge University Computer Laboratory Author: Jasmin Blanchette, TU Muenchen Testing Metis and Sledgehammer. " sessions "HOL-Decision_Procs" theories Abstraction Big_O Binary_Tree Clausification Message Proxies Tarski Trans_Closure Sets session "HOL-Nitpick_Examples" in Nitpick_Examples = HOL + description " Author: Jasmin Blanchette, TU Muenchen Copyright 2009 " options [kodkod_scala] sessions "HOL-Library" theories [quick_and_dirty] Nitpick_Examples session "HOL-Algebra" (main timing) in Algebra = "HOL-Computational_Algebra" + description " Author: Clemens Ballarin, started 24 September 1999, and many others The Isabelle Algebraic Library. " sessions "HOL-Cardinals" "HOL-Combinatorics" theories (* Orders and Lattices *) Galois_Connection (* Knaster-Tarski theorem and Galois connections *) (* Groups *) FiniteProduct (* Product operator for commutative groups *) Sylow (* Sylow's theorem *) Bij (* Automorphism Groups *) Multiplicative_Group Zassenhaus (* The Zassenhaus lemma *) (* Rings *) Divisibility (* Rings *) IntRing (* Ideals and residue classes *) UnivPoly (* Polynomials *) (* Main theory *) Algebra document_files "root.bib" "root.tex" session "HOL-Auth" (timing) in Auth = HOL + description " A new approach to verifying authentication protocols. " sessions "HOL-Library" directories "Smartcard" "Guard" theories Auth_Shared Auth_Public "Smartcard/Auth_Smartcard" "Guard/Auth_Guard_Shared" "Guard/Auth_Guard_Public" document_files "root.tex" session "HOL-UNITY" (timing) in UNITY = "HOL-Auth" + description " Author: Lawrence C Paulson, Cambridge University Computer Laboratory Copyright 1998 University of Cambridge Verifying security protocols using Chandy and Misra's UNITY formalism. " directories "Simple" "Comp" theories (*Basic meta-theory*) UNITY_Main (*Simple examples: no composition*) "Simple/Deadlock" "Simple/Common" "Simple/Network" "Simple/Token" "Simple/Channel" "Simple/Lift" "Simple/Mutex" "Simple/Reach" "Simple/Reachability" (*Verifying security protocols using UNITY*) "Simple/NSP_Bad" (*Example of composition*) "Comp/Handshake" (*Universal properties examples*) "Comp/Counter" "Comp/Counterc" "Comp/Priority" "Comp/TimerArray" "Comp/Progress" "Comp/Alloc" "Comp/AllocImpl" "Comp/Client" (*obsolete*) ELT document_files "root.tex" session "HOL-Unix" in Unix = HOL + options [print_mode = "no_brackets,no_type_brackets"] sessions "HOL-Library" theories Unix document_files "root.bib" "root.tex" session "HOL-ZF" in ZF = HOL + sessions "HOL-Library" theories MainZF Games document_files "root.tex" session "HOL-Imperative_HOL" (timing) in Imperative_HOL = HOL + options [print_mode = "iff,no_brackets"] sessions "HOL-Library" directories "ex" theories Imperative_HOL_ex document_files "root.bib" "root.tex" session "HOL-Decision_Procs" (timing) in Decision_Procs = "HOL-Algebra" + description " Various decision procedures, typically involving reflection. " directories "ex" theories Decision_Procs session "HOL-Proofs-ex" in "Proofs/ex" = "HOL-Proofs" + sessions "HOL-Examples" theories Hilbert_Classical Proof_Terms XML_Data session "HOL-Proofs-Extraction" (timing) in "Proofs/Extraction" = "HOL-Proofs" + description " Examples for program extraction in Higher-Order Logic. " options [quick_and_dirty = false] sessions "HOL-Computational_Algebra" theories Greatest_Common_Divisor Warshall Higman_Extraction Pigeonhole Euclid document_files "root.bib" "root.tex" session "HOL-Proofs-Lambda" (timing) in "Proofs/Lambda" = "HOL-Proofs" + description \ Lambda Calculus in de Bruijn's Notation. This session defines lambda-calculus terms with de Bruijn indixes and proves confluence of beta, eta and beta+eta. The paper "More Church-Rosser Proofs (in Isabelle/HOL)" describes the whole theory (see http://www.in.tum.de/~nipkow/pubs/jar2001.html). \ options [print_mode = "no_brackets", quick_and_dirty = false] sessions "HOL-Library" theories Eta StrongNorm Standardization WeakNorm document_files "root.bib" "root.tex" session "HOL-Prolog" in Prolog = HOL + description " Author: David von Oheimb (based on a lecture on Lambda Prolog by Nadathur) A bare-bones implementation of Lambda-Prolog. This is a simple exploratory implementation of Lambda-Prolog in HOL, including some minimal examples (in Test.thy) and a more typical example of a little functional language and its type system. " theories Test Type session "HOL-MicroJava" (timing) in MicroJava = HOL + description " Formalization of a fragment of Java, together with a corresponding virtual machine and a specification of its bytecode verifier and a lightweight bytecode verifier, including proofs of type-safety. " sessions "HOL-Library" "HOL-Eisbach" directories "BV" "Comp" "DFA" "J" "JVM" theories MicroJava document_files "introduction.tex" "root.bib" "root.tex" session "HOL-NanoJava" in NanoJava = HOL + description " Hoare Logic for a tiny fragment of Java. " theories Example document_files "root.bib" "root.tex" session "HOL-Bali" (timing) in Bali = HOL + sessions "HOL-Library" theories AxExample AxSound AxCompl Trans TypeSafe document_files "root.tex" session "HOL-IOA" in IOA = HOL + description \ Author: Tobias Nipkow and Konrad Slind and Olaf Müller Copyright 1994--1996 TU Muenchen The meta-theory of I/O-Automata in HOL. This formalization has been significantly changed and extended, see HOLCF/IOA. There are also the proofs of two communication protocols which formerly have been here. @inproceedings{Nipkow-Slind-IOA, author={Tobias Nipkow and Konrad Slind}, title={{I/O} Automata in {Isabelle/HOL}}, booktitle={Proc.\ TYPES Workshop 1994}, publisher=Springer, series=LNCS, note={To appear}} ftp://ftp.informatik.tu-muenchen.de/local/lehrstuhl/nipkow/ioa.ps.gz and @inproceedings{Mueller-Nipkow, author={Olaf M\"uller and Tobias Nipkow}, title={Combining Model Checking and Deduction for {I/O}-Automata}, booktitle={Proc.\ TACAS Workshop}, organization={Aarhus University, BRICS report}, year=1995} ftp://ftp.informatik.tu-muenchen.de/local/lehrstuhl/nipkow/tacas.dvi.gz \ theories Solve session "HOL-Lattice" in Lattice = HOL + description " Author: Markus Wenzel, TU Muenchen Basic theory of lattices and orders. " theories CompleteLattice document_files "root.tex" session "HOL-ex" (timing) in ex = "HOL-Number_Theory" + description " Miscellaneous examples and experiments for Isabelle/HOL. " theories Antiquote Argo_Examples Arith_Examples Ballot BinEx Birthday_Paradox Bubblesort CTL Cartouche_Examples Case_Product Chinese Classical Code_Binary_Nat_examples Code_Lazy_Demo Code_Timing Coercion_Examples Computations Conditional_Parametricity_Examples Cubic_Quartic Datatype_Record_Examples Dedekind_Real Erdoes_Szekeres Eval_Examples Executable_Relation Execute_Choice Function_Growth Gauge_Integration HarmonicSeries Hebrew Hex_Bin_Examples IArray_Examples Intuitionistic Join_Theory Lagrange List_to_Set_Comprehension_Examples LocaleTest2 MergeSort MonoidGroup Multiquote NatSum Normalization_by_Evaluation PER Parallel_Example Peano_Axioms Perm_Fragments PresburgerEx Primrec Pythagoras Quicksort Radix_Sort Reflection_Examples Refute_Examples Residue_Ring - Rewrite_Examples SOS SOS_Cert Serbian Set_Comprehension_Pointfree_Examples Set_Theory Simproc_Tests Simps_Case_Conv_Examples Sketch_and_Explore Sorting_Algorithms_Examples Specifications_with_bundle_mixins Sqrt_Script Sudoku Sum_of_Powers Tarski Termination ThreeDivides Transfer_Debug Transfer_Int_Nat Transitive_Closure_Table_Ex Tree23 Triangular_Numbers Unification While_Combinator_Example veriT_Preprocessing theories [skip_proofs = false] SAT_Examples Meson_Test session "HOL-Isar_Examples" in Isar_Examples = "HOL-Computational_Algebra" + description " Miscellaneous Isabelle/Isar examples. " options [quick_and_dirty] sessions "HOL-Hoare" theories Structured_Statements Basic_Logic Expr_Compiler Fibonacci Group Group_Context Group_Notepad Hoare_Ex Mutilated_Checkerboard Puzzle Summation document_files "root.bib" "root.tex" session "HOL-Eisbach" in Eisbach = HOL + description \ The Eisbach proof method language and "match" method. \ sessions FOL "HOL-Analysis" theories Eisbach Tests Examples Examples_FOL Example_Metric session "HOL-SET_Protocol" (timing) in SET_Protocol = HOL + description " Verification of the SET Protocol. " sessions "HOL-Library" theories SET_Protocol document_files "root.tex" session "HOL-Matrix_LP" in Matrix_LP = HOL + description " Two-dimensional matrices and linear programming. " sessions "HOL-Library" directories "Compute_Oracle" theories Cplex document_files "root.tex" session "HOL-TLA" in TLA = HOL + description " Lamport's Temporal Logic of Actions. " theories TLA session "HOL-TLA-Inc" in "TLA/Inc" = "HOL-TLA" + theories Inc session "HOL-TLA-Buffer" in "TLA/Buffer" = "HOL-TLA" + theories DBuffer session "HOL-TLA-Memory" in "TLA/Memory" = "HOL-TLA" + theories MemoryImplementation session "HOL-TPTP" in TPTP = HOL + description " Author: Jasmin Blanchette, TU Muenchen Author: Nik Sultana, University of Cambridge Copyright 2011 TPTP-related extensions. " sessions "HOL-Library" theories ATP_Theory_Export MaSh_Eval TPTP_Interpret THF_Arith TPTP_Proof_Reconstruction theories ATP_Problem_Import session "HOL-Probability" (main timing) in "Probability" = "HOL-Analysis" + sessions "HOL-Combinatorics" theories Probability document_files "root.tex" session "HOL-Probability-ex" (timing) in "Probability/ex" = "HOL-Probability" + theories Dining_Cryptographers Koepf_Duermuth_Countermeasure Measure_Not_CCC session "HOL-Nominal" in Nominal = HOL + sessions "HOL-Library" theories Nominal session "HOL-Nominal-Examples" (timing) in "Nominal/Examples" = "HOL-Nominal" + theories Class3 CK_Machine Compile Contexts Crary CR_Takahashi CR Fsub Height Lambda_mu Lam_Funs LocalWeakening Pattern SN SOS Standardization Support Type_Preservation Weakening W theories [quick_and_dirty] VC_Condition session "HOL-Cardinals" (timing) in Cardinals = HOL + description " Ordinals and Cardinals, Full Theories. " theories Cardinals Bounded_Set document_files "intro.tex" "root.tex" "root.bib" session "HOL-Datatype_Examples" (timing) in Datatype_Examples = "HOL-Library" + description " (Co)datatype Examples. " directories "Derivation_Trees" theories Compat Lambda_Term Process TreeFsetI "Derivation_Trees/Gram_Lang" "Derivation_Trees/Parallel_Composition" Koenig Lift_BNF Milner_Tofte Stream_Processor Cyclic_List Free_Idempotent_Monoid Regex_ACI Regex_ACIDZ TLList FAE_Sequence Misc_Codatatype Misc_Datatype Misc_Primcorec Misc_Primrec Datatype_Simproc_Tests session "HOL-Corec_Examples" (timing) in Corec_Examples = "HOL-Library" + description " Corecursion Examples. " directories "Tests" theories LFilter Paper_Examples Stream_Processor "Tests/Simple_Nesting" "Tests/Iterate_GPV" theories [quick_and_dirty] "Tests/GPV_Bare_Bones" "Tests/Merge_D" "Tests/Merge_Poly" "Tests/Misc_Mono" "Tests/Misc_Poly" "Tests/Small_Concrete" "Tests/Stream_Friends" "Tests/TLList_Friends" "Tests/Type_Class" session "HOL-Statespace" in Statespace = HOL + theories [skip_proofs = false] StateSpaceEx document_files "root.tex" session "HOL-Nonstandard_Analysis" (timing) in Nonstandard_Analysis = "HOL-Computational_Algebra" + description " Nonstandard analysis. " theories Nonstandard_Analysis document_files "root.tex" session "HOL-Nonstandard_Analysis-Examples" (timing) in "Nonstandard_Analysis/Examples" = "HOL-Nonstandard_Analysis" + theories NSPrimes session "HOL-Mirabelle-ex" in "Tools/Mirabelle" = HOL + description "Some arbitrary small test case for Mirabelle." options [timeout = 60, mirabelle_theories = "HOL-Analysis.Inner_Product", mirabelle_actions = "arith"] sessions "HOL-Analysis" theories "HOL-Analysis.Inner_Product" session "HOL-SMT_Examples" (timing) in SMT_Examples = HOL + options [quick_and_dirty] sessions "HOL-Library" theories Boogie SMT_Examples SMT_Word_Examples SMT_Examples_Verit SMT_Tests_Verit theories [condition = Z3_INSTALLED] SMT_Tests session "HOL-SPARK" in "SPARK" = HOL + sessions "HOL-Library" theories SPARK session "HOL-SPARK-Examples" in "SPARK/Examples" = "HOL-SPARK" + directories "Gcd" "Liseq" "RIPEMD-160" "Sqrt" theories "Gcd/Greatest_Common_Divisor" "Liseq/Longest_Increasing_Subsequence" "RIPEMD-160/F" "RIPEMD-160/Hash" "RIPEMD-160/K_L" "RIPEMD-160/K_R" "RIPEMD-160/R_L" "RIPEMD-160/Round" "RIPEMD-160/R_R" "RIPEMD-160/S_L" "RIPEMD-160/S_R" "Sqrt/Sqrt" export_files (in ".") "*:**.prv" session "HOL-SPARK-Manual" in "SPARK/Manual" = "HOL-SPARK" + options [show_question_marks = false] sessions "HOL-Library" "HOL-SPARK-Examples" theories Example_Verification VC_Principles Reference Complex_Types document_theories "HOL-SPARK-Examples.Greatest_Common_Divisor" document_files "complex_types.ads" "complex_types_app.adb" "complex_types_app.ads" "Gcd.adb" "Gcd.ads" "intro.tex" "loop_invariant.adb" "loop_invariant.ads" "root.bib" "root.tex" "Simple_Gcd.adb" "Simple_Gcd.ads" session "HOL-Mutabelle" in Mutabelle = HOL + sessions "HOL-Library" theories MutabelleExtra session "HOL-Quickcheck_Examples" (timing) in Quickcheck_Examples = HOL + sessions "HOL-Library" theories Quickcheck_Examples Quickcheck_Lattice_Examples Completeness Quickcheck_Interfaces Quickcheck_Nesting_Example theories [condition = ISABELLE_GHC] Hotel_Example Quickcheck_Narrowing_Examples session "HOL-Quotient_Examples" (timing) in Quotient_Examples = "HOL-Algebra" + description " Author: Cezary Kaliszyk and Christian Urban " theories DList Quotient_FSet Quotient_Int Quotient_Message Lift_FSet Lift_Set Lift_Fun Quotient_Rat Lift_DList Int_Pow Lifting_Code_Dt_Test session "HOL-Predicate_Compile_Examples" (timing) in Predicate_Compile_Examples = HOL + sessions "HOL-Library" theories Examples Predicate_Compile_Tests Predicate_Compile_Quickcheck_Examples Specialisation_Examples IMP_1 IMP_2 (* FIXME since 21-Jul-2011 Hotel_Example_Small_Generator *) IMP_3 IMP_4 theories [condition = ISABELLE_SWIPL] Code_Prolog_Examples Context_Free_Grammar_Example Hotel_Example_Prolog Lambda_Example List_Examples theories [condition = ISABELLE_SWIPL, quick_and_dirty] Reg_Exp_Example session "HOL-Types_To_Sets" in Types_To_Sets = HOL + description " Experimental extension of Higher-Order Logic to allow translation of types to sets. " directories "Examples" theories Types_To_Sets "Examples/Prerequisites" "Examples/Finite" "Examples/T2_Spaces" "Examples/Unoverload_Def" "Examples/Linear_Algebra_On" session HOLCF (main timing) in HOLCF = HOL + description " Author: Franz Regensburger Author: Brian Huffman HOLCF -- a semantic extension of HOL by the LCF logic. " sessions "HOL-Library" theories HOLCF (global) document_files "root.tex" session "HOLCF-Tutorial" in "HOLCF/Tutorial" = HOLCF + theories Domain_ex Fixrec_ex New_Domain document_files "root.tex" session "HOLCF-Library" in "HOLCF/Library" = HOLCF + theories HOLCF_Library HOL_Cpo session "HOLCF-IMP" in "HOLCF/IMP" = HOLCF + description " IMP -- A WHILE-language and its Semantics. This is the HOLCF-based denotational semantics of a simple WHILE-language. " sessions "HOL-IMP" theories HoareEx document_files "isaverbatimwrite.sty" "root.tex" "root.bib" session "HOLCF-ex" in "HOLCF/ex" = "HOLCF-Library" + description " Miscellaneous examples for HOLCF. " theories Dnat Dagstuhl Focus_ex Fix2 Hoare Concurrency_Monad Loop Powerdomain_ex Domain_Proofs Letrec Pattern_Match session "HOLCF-FOCUS" in "HOLCF/FOCUS" = "HOLCF-Library" + description \ FOCUS: a theory of stream-processing functions Isabelle/HOLCF. For introductions to FOCUS, see "The Design of Distributed Systems - An Introduction to FOCUS" http://www4.in.tum.de/publ/html.php?e=2 "Specification and Refinement of a Buffer of Length One" http://www4.in.tum.de/publ/html.php?e=15 "Specification and Development of Interactive Systems: Focus on Streams, Interfaces, and Refinement" http://www4.in.tum.de/publ/html.php?e=321 \ theories Fstreams FOCUS Buffer_adm session IOA (timing) in "HOLCF/IOA" = HOLCF + description " Author: Olaf Müller Copyright 1997 TU München A formalization of I/O automata in HOLCF. The distribution contains simulation relations, temporal logic, and an abstraction theory. Everything is based upon a domain-theoretic model of finite and infinite sequences. " theories Abstraction session "IOA-ABP" in "HOLCF/IOA/ABP" = IOA + description " Author: Olaf Müller The Alternating Bit Protocol performed in I/O-Automata: combining IOA with Model Checking. Theory Correctness contains the proof of the abstraction from unbounded channels to finite ones. File Check.ML contains a simple ModelChecker prototype checking Spec against the finite version of the ABP-protocol. " theories Correctness Spec session "IOA-NTP" in "HOLCF/IOA/NTP" = IOA + description " Author: Tobias Nipkow & Konrad Slind A network transmission protocol, performed in the I/O automata formalization by Olaf Müller. " theories Overview Correctness session "IOA-Storage" in "HOLCF/IOA/Storage" = IOA + description " Author: Olaf Müller Memory storage case study. " theories Correctness session "IOA-ex" in "HOLCF/IOA/ex" = IOA + description " Author: Olaf Müller " theories TrivEx TrivEx2 diff --git a/src/HOL/Relation.thy b/src/HOL/Relation.thy --- a/src/HOL/Relation.thy +++ b/src/HOL/Relation.thy @@ -1,1253 +1,1269 @@ (* Title: HOL/Relation.thy Author: Lawrence C Paulson, Cambridge University Computer Laboratory Author: Stefan Berghofer, TU Muenchen *) section \Relations -- as sets of pairs, and binary predicates\ theory Relation imports Finite_Set begin text \A preliminary: classical rules for reasoning on predicates\ declare predicate1I [Pure.intro!, intro!] declare predicate1D [Pure.dest, dest] declare predicate2I [Pure.intro!, intro!] declare predicate2D [Pure.dest, dest] declare bot1E [elim!] declare bot2E [elim!] declare top1I [intro!] declare top2I [intro!] declare inf1I [intro!] declare inf2I [intro!] declare inf1E [elim!] declare inf2E [elim!] declare sup1I1 [intro?] declare sup2I1 [intro?] declare sup1I2 [intro?] declare sup2I2 [intro?] declare sup1E [elim!] declare sup2E [elim!] declare sup1CI [intro!] declare sup2CI [intro!] declare Inf1_I [intro!] declare INF1_I [intro!] declare Inf2_I [intro!] declare INF2_I [intro!] declare Inf1_D [elim] declare INF1_D [elim] declare Inf2_D [elim] declare INF2_D [elim] declare Inf1_E [elim] declare INF1_E [elim] declare Inf2_E [elim] declare INF2_E [elim] declare Sup1_I [intro] declare SUP1_I [intro] declare Sup2_I [intro] declare SUP2_I [intro] declare Sup1_E [elim!] declare SUP1_E [elim!] declare Sup2_E [elim!] declare SUP2_E [elim!] subsection \Fundamental\ subsubsection \Relations as sets of pairs\ type_synonym 'a rel = "('a \ 'a) set" lemma subrelI: "(\x y. (x, y) \ r \ (x, y) \ s) \ r \ s" \ \Version of @{thm [source] subsetI} for binary relations\ by auto lemma lfp_induct2: "(a, b) \ lfp f \ mono f \ (\a b. (a, b) \ f (lfp f \ {(x, y). P x y}) \ P a b) \ P a b" \ \Version of @{thm [source] lfp_induct} for binary relations\ using lfp_induct_set [of "(a, b)" f "case_prod P"] by auto subsubsection \Conversions between set and predicate relations\ lemma pred_equals_eq [pred_set_conv]: "(\x. x \ R) = (\x. x \ S) \ R = S" by (simp add: set_eq_iff fun_eq_iff) lemma pred_equals_eq2 [pred_set_conv]: "(\x y. (x, y) \ R) = (\x y. (x, y) \ S) \ R = S" by (simp add: set_eq_iff fun_eq_iff) lemma pred_subset_eq [pred_set_conv]: "(\x. x \ R) \ (\x. x \ S) \ R \ S" by (simp add: subset_iff le_fun_def) lemma pred_subset_eq2 [pred_set_conv]: "(\x y. (x, y) \ R) \ (\x y. (x, y) \ S) \ R \ S" by (simp add: subset_iff le_fun_def) lemma bot_empty_eq [pred_set_conv]: "\ = (\x. x \ {})" by (auto simp add: fun_eq_iff) lemma bot_empty_eq2 [pred_set_conv]: "\ = (\x y. (x, y) \ {})" by (auto simp add: fun_eq_iff) lemma top_empty_eq [pred_set_conv]: "\ = (\x. x \ UNIV)" by (auto simp add: fun_eq_iff) lemma top_empty_eq2 [pred_set_conv]: "\ = (\x y. (x, y) \ UNIV)" by (auto simp add: fun_eq_iff) lemma inf_Int_eq [pred_set_conv]: "(\x. x \ R) \ (\x. x \ S) = (\x. x \ R \ S)" by (simp add: inf_fun_def) lemma inf_Int_eq2 [pred_set_conv]: "(\x y. (x, y) \ R) \ (\x y. (x, y) \ S) = (\x y. (x, y) \ R \ S)" by (simp add: inf_fun_def) lemma sup_Un_eq [pred_set_conv]: "(\x. x \ R) \ (\x. x \ S) = (\x. x \ R \ S)" by (simp add: sup_fun_def) lemma sup_Un_eq2 [pred_set_conv]: "(\x y. (x, y) \ R) \ (\x y. (x, y) \ S) = (\x y. (x, y) \ R \ S)" by (simp add: sup_fun_def) lemma INF_INT_eq [pred_set_conv]: "(\i\S. (\x. x \ r i)) = (\x. x \ (\i\S. r i))" by (simp add: fun_eq_iff) lemma INF_INT_eq2 [pred_set_conv]: "(\i\S. (\x y. (x, y) \ r i)) = (\x y. (x, y) \ (\i\S. r i))" by (simp add: fun_eq_iff) lemma SUP_UN_eq [pred_set_conv]: "(\i\S. (\x. x \ r i)) = (\x. x \ (\i\S. r i))" by (simp add: fun_eq_iff) lemma SUP_UN_eq2 [pred_set_conv]: "(\i\S. (\x y. (x, y) \ r i)) = (\x y. (x, y) \ (\i\S. r i))" by (simp add: fun_eq_iff) lemma Inf_INT_eq [pred_set_conv]: "\S = (\x. x \ (\(Collect ` S)))" by (simp add: fun_eq_iff) lemma INF_Int_eq [pred_set_conv]: "(\i\S. (\x. x \ i)) = (\x. x \ \S)" by (simp add: fun_eq_iff) lemma Inf_INT_eq2 [pred_set_conv]: "\S = (\x y. (x, y) \ (\(Collect ` case_prod ` S)))" by (simp add: fun_eq_iff) lemma INF_Int_eq2 [pred_set_conv]: "(\i\S. (\x y. (x, y) \ i)) = (\x y. (x, y) \ \S)" by (simp add: fun_eq_iff) lemma Sup_SUP_eq [pred_set_conv]: "\S = (\x. x \ \(Collect ` S))" by (simp add: fun_eq_iff) lemma SUP_Sup_eq [pred_set_conv]: "(\i\S. (\x. x \ i)) = (\x. x \ \S)" by (simp add: fun_eq_iff) lemma Sup_SUP_eq2 [pred_set_conv]: "\S = (\x y. (x, y) \ (\(Collect ` case_prod ` S)))" by (simp add: fun_eq_iff) lemma SUP_Sup_eq2 [pred_set_conv]: "(\i\S. (\x y. (x, y) \ i)) = (\x y. (x, y) \ \S)" by (simp add: fun_eq_iff) subsection \Properties of relations\ subsubsection \Reflexivity\ definition refl_on :: "'a set \ 'a rel \ bool" where "refl_on A r \ r \ A \ A \ (\x\A. (x, x) \ r)" abbreviation refl :: "'a rel \ bool" \ \reflexivity over a type\ where "refl \ refl_on UNIV" definition reflp :: "('a \ 'a \ bool) \ bool" where "reflp r \ (\x. r x x)" lemma reflp_refl_eq [pred_set_conv]: "reflp (\x y. (x, y) \ r) \ refl r" by (simp add: refl_on_def reflp_def) lemma refl_onI [intro?]: "r \ A \ A \ (\x. x \ A \ (x, x) \ r) \ refl_on A r" unfolding refl_on_def by (iprover intro!: ballI) lemma refl_onD: "refl_on A r \ a \ A \ (a, a) \ r" unfolding refl_on_def by blast lemma refl_onD1: "refl_on A r \ (x, y) \ r \ x \ A" unfolding refl_on_def by blast lemma refl_onD2: "refl_on A r \ (x, y) \ r \ y \ A" unfolding refl_on_def by blast lemma reflpI [intro?]: "(\x. r x x) \ reflp r" by (auto intro: refl_onI simp add: reflp_def) lemma reflpE: assumes "reflp r" obtains "r x x" using assms by (auto dest: refl_onD simp add: reflp_def) lemma reflpD [dest?]: assumes "reflp r" shows "r x x" using assms by (auto elim: reflpE) lemma refl_on_Int: "refl_on A r \ refl_on B s \ refl_on (A \ B) (r \ s)" unfolding refl_on_def by blast lemma reflp_inf: "reflp r \ reflp s \ reflp (r \ s)" by (auto intro: reflpI elim: reflpE) lemma refl_on_Un: "refl_on A r \ refl_on B s \ refl_on (A \ B) (r \ s)" unfolding refl_on_def by blast lemma reflp_sup: "reflp r \ reflp s \ reflp (r \ s)" by (auto intro: reflpI elim: reflpE) lemma refl_on_INTER: "\x\S. refl_on (A x) (r x) \ refl_on (\(A ` S)) (\(r ` S))" unfolding refl_on_def by fast lemma refl_on_UNION: "\x\S. refl_on (A x) (r x) \ refl_on (\(A ` S)) (\(r ` S))" unfolding refl_on_def by blast lemma refl_on_empty [simp]: "refl_on {} {}" by (simp add: refl_on_def) lemma refl_on_singleton [simp]: "refl_on {x} {(x, x)}" by (blast intro: refl_onI) lemma refl_on_def' [nitpick_unfold, code]: "refl_on A r \ (\(x, y) \ r. x \ A \ y \ A) \ (\x \ A. (x, x) \ r)" by (auto intro: refl_onI dest: refl_onD refl_onD1 refl_onD2) lemma reflp_equality [simp]: "reflp (=)" by (simp add: reflp_def) lemma reflp_mono: "reflp R \ (\x y. R x y \ Q x y) \ reflp Q" by (auto intro: reflpI dest: reflpD) subsubsection \Irreflexivity\ definition irrefl :: "'a rel \ bool" where "irrefl r \ (\a. (a, a) \ r)" definition irreflp :: "('a \ 'a \ bool) \ bool" where "irreflp R \ (\a. \ R a a)" lemma irreflp_irrefl_eq [pred_set_conv]: "irreflp (\a b. (a, b) \ R) \ irrefl R" by (simp add: irrefl_def irreflp_def) lemma irreflI [intro?]: "(\a. (a, a) \ R) \ irrefl R" by (simp add: irrefl_def) lemma irreflpI [intro?]: "(\a. \ R a a) \ irreflp R" by (fact irreflI [to_pred]) lemma irrefl_distinct [code]: "irrefl r \ (\(a, b) \ r. a \ b)" by (auto simp add: irrefl_def) +lemma (in preorder) irreflp_less[simp]: "irreflp (<)" + by (simp add: irreflpI) + +lemma (in preorder) irreflp_greater[simp]: "irreflp (>)" + by (simp add: irreflpI) subsubsection \Asymmetry\ inductive asym :: "'a rel \ bool" where asymI: "(\a b. (a, b) \ R \ (b, a) \ R) \ asym R" inductive asymp :: "('a \ 'a \ bool) \ bool" where asympI: "(\a b. R a b \ \ R b a) \ asymp R" lemma asymp_asym_eq [pred_set_conv]: "asymp (\a b. (a, b) \ R) \ asym R" by (auto intro!: asymI asympI elim: asym.cases asymp.cases simp add: irreflp_irrefl_eq) lemma asymD: "\asym R; (x,y) \ R\ \ (y,x) \ R" by (simp add: asym.simps) lemma asym_iff: "asym R \ (\x y. (x,y) \ R \ (y,x) \ R)" by (blast intro: asymI dest: asymD) +context preorder begin + +lemma asymp_less[simp]: "asymp (<)" + by (auto intro: asympI dual_order.asym) + +lemma asymp_greater[simp]: "asymp (>)" + by (auto intro: asympI dual_order.asym) + +end + + subsubsection \Symmetry\ definition sym :: "'a rel \ bool" where "sym r \ (\x y. (x, y) \ r \ (y, x) \ r)" definition symp :: "('a \ 'a \ bool) \ bool" where "symp r \ (\x y. r x y \ r y x)" lemma symp_sym_eq [pred_set_conv]: "symp (\x y. (x, y) \ r) \ sym r" by (simp add: sym_def symp_def) lemma symI [intro?]: "(\a b. (a, b) \ r \ (b, a) \ r) \ sym r" by (unfold sym_def) iprover lemma sympI [intro?]: "(\a b. r a b \ r b a) \ symp r" by (fact symI [to_pred]) lemma symE: assumes "sym r" and "(b, a) \ r" obtains "(a, b) \ r" using assms by (simp add: sym_def) lemma sympE: assumes "symp r" and "r b a" obtains "r a b" using assms by (rule symE [to_pred]) lemma symD [dest?]: assumes "sym r" and "(b, a) \ r" shows "(a, b) \ r" using assms by (rule symE) lemma sympD [dest?]: assumes "symp r" and "r b a" shows "r a b" using assms by (rule symD [to_pred]) lemma sym_Int: "sym r \ sym s \ sym (r \ s)" by (fast intro: symI elim: symE) lemma symp_inf: "symp r \ symp s \ symp (r \ s)" by (fact sym_Int [to_pred]) lemma sym_Un: "sym r \ sym s \ sym (r \ s)" by (fast intro: symI elim: symE) lemma symp_sup: "symp r \ symp s \ symp (r \ s)" by (fact sym_Un [to_pred]) lemma sym_INTER: "\x\S. sym (r x) \ sym (\(r ` S))" by (fast intro: symI elim: symE) lemma symp_INF: "\x\S. symp (r x) \ symp (\(r ` S))" by (fact sym_INTER [to_pred]) lemma sym_UNION: "\x\S. sym (r x) \ sym (\(r ` S))" by (fast intro: symI elim: symE) lemma symp_SUP: "\x\S. symp (r x) \ symp (\(r ` S))" by (fact sym_UNION [to_pred]) subsubsection \Antisymmetry\ definition antisym :: "'a rel \ bool" where "antisym r \ (\x y. (x, y) \ r \ (y, x) \ r \ x = y)" definition antisymp :: "('a \ 'a \ bool) \ bool" where "antisymp r \ (\x y. r x y \ r y x \ x = y)" lemma antisymp_antisym_eq [pred_set_conv]: "antisymp (\x y. (x, y) \ r) \ antisym r" by (simp add: antisym_def antisymp_def) lemma antisymI [intro?]: "(\x y. (x, y) \ r \ (y, x) \ r \ x = y) \ antisym r" unfolding antisym_def by iprover lemma antisympI [intro?]: "(\x y. r x y \ r y x \ x = y) \ antisymp r" by (fact antisymI [to_pred]) lemma antisymD [dest?]: "antisym r \ (a, b) \ r \ (b, a) \ r \ a = b" unfolding antisym_def by iprover lemma antisympD [dest?]: "antisymp r \ r a b \ r b a \ a = b" by (fact antisymD [to_pred]) lemma antisym_subset: "r \ s \ antisym s \ antisym r" unfolding antisym_def by blast lemma antisymp_less_eq: "r \ s \ antisymp s \ antisymp r" by (fact antisym_subset [to_pred]) lemma antisym_empty [simp]: "antisym {}" unfolding antisym_def by blast lemma antisym_bot [simp]: "antisymp \" by (fact antisym_empty [to_pred]) lemma antisymp_equality [simp]: "antisymp HOL.eq" by (auto intro: antisympI) lemma antisym_singleton [simp]: "antisym {x}" by (blast intro: antisymI) subsubsection \Transitivity\ definition trans :: "'a rel \ bool" where "trans r \ (\x y z. (x, y) \ r \ (y, z) \ r \ (x, z) \ r)" definition transp :: "('a \ 'a \ bool) \ bool" where "transp r \ (\x y z. r x y \ r y z \ r x z)" lemma transp_trans_eq [pred_set_conv]: "transp (\x y. (x, y) \ r) \ trans r" by (simp add: trans_def transp_def) lemma transI [intro?]: "(\x y z. (x, y) \ r \ (y, z) \ r \ (x, z) \ r) \ trans r" by (unfold trans_def) iprover lemma transpI [intro?]: "(\x y z. r x y \ r y z \ r x z) \ transp r" by (fact transI [to_pred]) lemma transE: assumes "trans r" and "(x, y) \ r" and "(y, z) \ r" obtains "(x, z) \ r" using assms by (unfold trans_def) iprover lemma transpE: assumes "transp r" and "r x y" and "r y z" obtains "r x z" using assms by (rule transE [to_pred]) lemma transD [dest?]: assumes "trans r" and "(x, y) \ r" and "(y, z) \ r" shows "(x, z) \ r" using assms by (rule transE) lemma transpD [dest?]: assumes "transp r" and "r x y" and "r y z" shows "r x z" using assms by (rule transD [to_pred]) lemma trans_Int: "trans r \ trans s \ trans (r \ s)" by (fast intro: transI elim: transE) lemma transp_inf: "transp r \ transp s \ transp (r \ s)" by (fact trans_Int [to_pred]) lemma trans_INTER: "\x\S. trans (r x) \ trans (\(r ` S))" by (fast intro: transI elim: transD) lemma transp_INF: "\x\S. transp (r x) \ transp (\(r ` S))" by (fact trans_INTER [to_pred]) lemma trans_join [code]: "trans r \ (\(x, y1) \ r. \(y2, z) \ r. y1 = y2 \ (x, z) \ r)" by (auto simp add: trans_def) lemma transp_trans: "transp r \ trans {(x, y). r x y}" by (simp add: trans_def transp_def) lemma transp_equality [simp]: "transp (=)" by (auto intro: transpI) lemma trans_empty [simp]: "trans {}" by (blast intro: transI) lemma transp_empty [simp]: "transp (\x y. False)" using trans_empty[to_pred] by (simp add: bot_fun_def) lemma trans_singleton [simp]: "trans {(a, a)}" by (blast intro: transI) lemma transp_singleton [simp]: "transp (\x y. x = a \ y = a)" by (simp add: transp_def) context preorder begin lemma transp_le[simp]: "transp (\)" by(auto simp add: transp_def intro: order_trans) lemma transp_less[simp]: "transp (<)" by(auto simp add: transp_def intro: less_trans) lemma transp_ge[simp]: "transp (\)" by(auto simp add: transp_def intro: order_trans) lemma transp_gr[simp]: "transp (>)" by(auto simp add: transp_def intro: less_trans) end subsubsection \Totality\ definition total_on :: "'a set \ 'a rel \ bool" where "total_on A r \ (\x\A. \y\A. x \ y \ (x, y) \ r \ (y, x) \ r)" lemma total_onI [intro?]: "(\x y. \x \ A; y \ A; x \ y\ \ (x, y) \ r \ (y, x) \ r) \ total_on A r" unfolding total_on_def by blast abbreviation "total \ total_on UNIV" lemma total_on_empty [simp]: "total_on {} r" by (simp add: total_on_def) lemma total_on_singleton [simp]: "total_on {x} {(x, x)}" unfolding total_on_def by blast subsubsection \Single valued relations\ definition single_valued :: "('a \ 'b) set \ bool" where "single_valued r \ (\x y. (x, y) \ r \ (\z. (x, z) \ r \ y = z))" definition single_valuedp :: "('a \ 'b \ bool) \ bool" where "single_valuedp r \ (\x y. r x y \ (\z. r x z \ y = z))" lemma single_valuedp_single_valued_eq [pred_set_conv]: "single_valuedp (\x y. (x, y) \ r) \ single_valued r" by (simp add: single_valued_def single_valuedp_def) lemma single_valuedp_iff_Uniq: "single_valuedp r \ (\x. \\<^sub>\\<^sub>1y. r x y)" unfolding Uniq_def single_valuedp_def by auto lemma single_valuedI: "(\x y. (x, y) \ r \ (\z. (x, z) \ r \ y = z)) \ single_valued r" unfolding single_valued_def by blast lemma single_valuedpI: "(\x y. r x y \ (\z. r x z \ y = z)) \ single_valuedp r" by (fact single_valuedI [to_pred]) lemma single_valuedD: "single_valued r \ (x, y) \ r \ (x, z) \ r \ y = z" by (simp add: single_valued_def) lemma single_valuedpD: "single_valuedp r \ r x y \ r x z \ y = z" by (fact single_valuedD [to_pred]) lemma single_valued_empty [simp]: "single_valued {}" by (simp add: single_valued_def) lemma single_valuedp_bot [simp]: "single_valuedp \" by (fact single_valued_empty [to_pred]) lemma single_valued_subset: "r \ s \ single_valued s \ single_valued r" unfolding single_valued_def by blast lemma single_valuedp_less_eq: "r \ s \ single_valuedp s \ single_valuedp r" by (fact single_valued_subset [to_pred]) subsection \Relation operations\ subsubsection \The identity relation\ definition Id :: "'a rel" where "Id = {p. \x. p = (x, x)}" lemma IdI [intro]: "(a, a) \ Id" by (simp add: Id_def) lemma IdE [elim!]: "p \ Id \ (\x. p = (x, x) \ P) \ P" unfolding Id_def by (iprover elim: CollectE) lemma pair_in_Id_conv [iff]: "(a, b) \ Id \ a = b" unfolding Id_def by blast lemma refl_Id: "refl Id" by (simp add: refl_on_def) lemma antisym_Id: "antisym Id" \ \A strange result, since \Id\ is also symmetric.\ by (simp add: antisym_def) lemma sym_Id: "sym Id" by (simp add: sym_def) lemma trans_Id: "trans Id" by (simp add: trans_def) lemma single_valued_Id [simp]: "single_valued Id" by (unfold single_valued_def) blast lemma irrefl_diff_Id [simp]: "irrefl (r - Id)" by (simp add: irrefl_def) lemma trans_diff_Id: "trans r \ antisym r \ trans (r - Id)" unfolding antisym_def trans_def by blast lemma total_on_diff_Id [simp]: "total_on A (r - Id) = total_on A r" by (simp add: total_on_def) lemma Id_fstsnd_eq: "Id = {x. fst x = snd x}" by force subsubsection \Diagonal: identity over a set\ definition Id_on :: "'a set \ 'a rel" where "Id_on A = (\x\A. {(x, x)})" lemma Id_on_empty [simp]: "Id_on {} = {}" by (simp add: Id_on_def) lemma Id_on_eqI: "a = b \ a \ A \ (a, b) \ Id_on A" by (simp add: Id_on_def) lemma Id_onI [intro!]: "a \ A \ (a, a) \ Id_on A" by (rule Id_on_eqI) (rule refl) lemma Id_onE [elim!]: "c \ Id_on A \ (\x. x \ A \ c = (x, x) \ P) \ P" \ \The general elimination rule.\ unfolding Id_on_def by (iprover elim!: UN_E singletonE) lemma Id_on_iff: "(x, y) \ Id_on A \ x = y \ x \ A" by blast lemma Id_on_def' [nitpick_unfold]: "Id_on {x. A x} = Collect (\(x, y). x = y \ A x)" by auto lemma Id_on_subset_Times: "Id_on A \ A \ A" by blast lemma refl_on_Id_on: "refl_on A (Id_on A)" by (rule refl_onI [OF Id_on_subset_Times Id_onI]) lemma antisym_Id_on [simp]: "antisym (Id_on A)" unfolding antisym_def by blast lemma sym_Id_on [simp]: "sym (Id_on A)" by (rule symI) clarify lemma trans_Id_on [simp]: "trans (Id_on A)" by (fast intro: transI elim: transD) lemma single_valued_Id_on [simp]: "single_valued (Id_on A)" unfolding single_valued_def by blast subsubsection \Composition\ inductive_set relcomp :: "('a \ 'b) set \ ('b \ 'c) set \ ('a \ 'c) set" (infixr "O" 75) for r :: "('a \ 'b) set" and s :: "('b \ 'c) set" where relcompI [intro]: "(a, b) \ r \ (b, c) \ s \ (a, c) \ r O s" notation relcompp (infixr "OO" 75) lemmas relcomppI = relcompp.intros text \ For historic reasons, the elimination rules are not wholly corresponding. Feel free to consolidate this. \ inductive_cases relcompEpair: "(a, c) \ r O s" inductive_cases relcomppE [elim!]: "(r OO s) a c" lemma relcompE [elim!]: "xz \ r O s \ (\x y z. xz = (x, z) \ (x, y) \ r \ (y, z) \ s \ P) \ P" apply (cases xz) apply simp apply (erule relcompEpair) apply iprover done lemma R_O_Id [simp]: "R O Id = R" by fast lemma Id_O_R [simp]: "Id O R = R" by fast lemma relcomp_empty1 [simp]: "{} O R = {}" by blast lemma relcompp_bot1 [simp]: "\ OO R = \" by (fact relcomp_empty1 [to_pred]) lemma relcomp_empty2 [simp]: "R O {} = {}" by blast lemma relcompp_bot2 [simp]: "R OO \ = \" by (fact relcomp_empty2 [to_pred]) lemma O_assoc: "(R O S) O T = R O (S O T)" by blast lemma relcompp_assoc: "(r OO s) OO t = r OO (s OO t)" by (fact O_assoc [to_pred]) lemma trans_O_subset: "trans r \ r O r \ r" by (unfold trans_def) blast lemma transp_relcompp_less_eq: "transp r \ r OO r \ r " by (fact trans_O_subset [to_pred]) lemma relcomp_mono: "r' \ r \ s' \ s \ r' O s' \ r O s" by blast lemma relcompp_mono: "r' \ r \ s' \ s \ r' OO s' \ r OO s " by (fact relcomp_mono [to_pred]) lemma relcomp_subset_Sigma: "r \ A \ B \ s \ B \ C \ r O s \ A \ C" by blast lemma relcomp_distrib [simp]: "R O (S \ T) = (R O S) \ (R O T)" by auto lemma relcompp_distrib [simp]: "R OO (S \ T) = R OO S \ R OO T" by (fact relcomp_distrib [to_pred]) lemma relcomp_distrib2 [simp]: "(S \ T) O R = (S O R) \ (T O R)" by auto lemma relcompp_distrib2 [simp]: "(S \ T) OO R = S OO R \ T OO R" by (fact relcomp_distrib2 [to_pred]) lemma relcomp_UNION_distrib: "s O \(r ` I) = (\i\I. s O r i) " by auto lemma relcompp_SUP_distrib: "s OO \(r ` I) = (\i\I. s OO r i)" by (fact relcomp_UNION_distrib [to_pred]) lemma relcomp_UNION_distrib2: "\(r ` I) O s = (\i\I. r i O s) " by auto lemma relcompp_SUP_distrib2: "\(r ` I) OO s = (\i\I. r i OO s)" by (fact relcomp_UNION_distrib2 [to_pred]) lemma single_valued_relcomp: "single_valued r \ single_valued s \ single_valued (r O s)" unfolding single_valued_def by blast lemma relcomp_unfold: "r O s = {(x, z). \y. (x, y) \ r \ (y, z) \ s}" by (auto simp add: set_eq_iff) lemma relcompp_apply: "(R OO S) a c \ (\b. R a b \ S b c)" unfolding relcomp_unfold [to_pred] .. lemma eq_OO: "(=) OO R = R" by blast lemma OO_eq: "R OO (=) = R" by blast subsubsection \Converse\ inductive_set converse :: "('a \ 'b) set \ ('b \ 'a) set" ("(_\)" [1000] 999) for r :: "('a \ 'b) set" where "(a, b) \ r \ (b, a) \ r\" notation conversep ("(_\\)" [1000] 1000) notation (ASCII) converse ("(_^-1)" [1000] 999) and conversep ("(_^--1)" [1000] 1000) lemma converseI [sym]: "(a, b) \ r \ (b, a) \ r\" by (fact converse.intros) lemma conversepI (* CANDIDATE [sym] *): "r a b \ r\\ b a" by (fact conversep.intros) lemma converseD [sym]: "(a, b) \ r\ \ (b, a) \ r" by (erule converse.cases) iprover lemma conversepD (* CANDIDATE [sym] *): "r\\ b a \ r a b" by (fact converseD [to_pred]) lemma converseE [elim!]: "yx \ r\ \ (\x y. yx = (y, x) \ (x, y) \ r \ P) \ P" \ \More general than \converseD\, as it ``splits'' the member of the relation.\ apply (cases yx) apply simp apply (erule converse.cases) apply iprover done lemmas conversepE [elim!] = conversep.cases lemma converse_iff [iff]: "(a, b) \ r\ \ (b, a) \ r" by (auto intro: converseI) lemma conversep_iff [iff]: "r\\ a b = r b a" by (fact converse_iff [to_pred]) lemma converse_converse [simp]: "(r\)\ = r" by (simp add: set_eq_iff) lemma conversep_conversep [simp]: "(r\\)\\ = r" by (fact converse_converse [to_pred]) lemma converse_empty[simp]: "{}\ = {}" by auto lemma converse_UNIV[simp]: "UNIV\ = UNIV" by auto lemma converse_relcomp: "(r O s)\ = s\ O r\" by blast lemma converse_relcompp: "(r OO s)\\ = s\\ OO r\\" by (iprover intro: order_antisym conversepI relcomppI elim: relcomppE dest: conversepD) lemma converse_Int: "(r \ s)\ = r\ \ s\" by blast lemma converse_meet: "(r \ s)\\ = r\\ \ s\\" by (simp add: inf_fun_def) (iprover intro: conversepI ext dest: conversepD) lemma converse_Un: "(r \ s)\ = r\ \ s\" by blast lemma converse_join: "(r \ s)\\ = r\\ \ s\\" by (simp add: sup_fun_def) (iprover intro: conversepI ext dest: conversepD) lemma converse_INTER: "(\(r ` S))\ = (\x\S. (r x)\)" by fast lemma converse_UNION: "(\(r ` S))\ = (\x\S. (r x)\)" by blast lemma converse_mono[simp]: "r\ \ s \ \ r \ s" by auto lemma conversep_mono[simp]: "r\\ \ s \\ \ r \ s" by (fact converse_mono[to_pred]) lemma converse_inject[simp]: "r\ = s \ \ r = s" by auto lemma conversep_inject[simp]: "r\\ = s \\ \ r = s" by (fact converse_inject[to_pred]) lemma converse_subset_swap: "r \ s \ \ r \ \ s" by auto lemma conversep_le_swap: "r \ s \\ \ r \\ \ s" by (fact converse_subset_swap[to_pred]) lemma converse_Id [simp]: "Id\ = Id" by blast lemma converse_Id_on [simp]: "(Id_on A)\ = Id_on A" by blast lemma refl_on_converse [simp]: "refl_on A (converse r) = refl_on A r" by (auto simp: refl_on_def) lemma sym_converse [simp]: "sym (converse r) = sym r" unfolding sym_def by blast lemma antisym_converse [simp]: "antisym (converse r) = antisym r" unfolding antisym_def by blast lemma trans_converse [simp]: "trans (converse r) = trans r" unfolding trans_def by blast lemma sym_conv_converse_eq: "sym r \ r\ = r" unfolding sym_def by fast lemma sym_Un_converse: "sym (r \ r\)" unfolding sym_def by blast lemma sym_Int_converse: "sym (r \ r\)" unfolding sym_def by blast lemma total_on_converse [simp]: "total_on A (r\) = total_on A r" by (auto simp: total_on_def) lemma finite_converse [iff]: "finite (r\) = finite r" unfolding converse_def conversep_iff using [[simproc add: finite_Collect]] by (auto elim: finite_imageD simp: inj_on_def) lemma card_inverse[simp]: "card (R\) = card R" proof - have *: "\R. prod.swap ` R = R\" by auto { assume "\finite R" hence ?thesis by auto } moreover { assume "finite R" with card_image_le[of R prod.swap] card_image_le[of "R\" prod.swap] have ?thesis by (auto simp: *) } ultimately show ?thesis by blast qed lemma conversep_noteq [simp]: "(\)\\ = (\)" by (auto simp add: fun_eq_iff) lemma conversep_eq [simp]: "(=)\\ = (=)" by (auto simp add: fun_eq_iff) lemma converse_unfold [code]: "r\ = {(y, x). (x, y) \ r}" by (simp add: set_eq_iff) subsubsection \Domain, range and field\ inductive_set Domain :: "('a \ 'b) set \ 'a set" for r :: "('a \ 'b) set" where DomainI [intro]: "(a, b) \ r \ a \ Domain r" lemmas DomainPI = Domainp.DomainI inductive_cases DomainE [elim!]: "a \ Domain r" inductive_cases DomainpE [elim!]: "Domainp r a" inductive_set Range :: "('a \ 'b) set \ 'b set" for r :: "('a \ 'b) set" where RangeI [intro]: "(a, b) \ r \ b \ Range r" lemmas RangePI = Rangep.RangeI inductive_cases RangeE [elim!]: "b \ Range r" inductive_cases RangepE [elim!]: "Rangep r b" definition Field :: "'a rel \ 'a set" where "Field r = Domain r \ Range r" lemma FieldI1: "(i, j) \ R \ i \ Field R" unfolding Field_def by blast lemma FieldI2: "(i, j) \ R \ j \ Field R" unfolding Field_def by auto lemma Domain_fst [code]: "Domain r = fst ` r" by force lemma Range_snd [code]: "Range r = snd ` r" by force lemma fst_eq_Domain: "fst ` R = Domain R" by force lemma snd_eq_Range: "snd ` R = Range R" by force lemma range_fst [simp]: "range fst = UNIV" by (auto simp: fst_eq_Domain) lemma range_snd [simp]: "range snd = UNIV" by (auto simp: snd_eq_Range) lemma Domain_empty [simp]: "Domain {} = {}" by auto lemma Range_empty [simp]: "Range {} = {}" by auto lemma Field_empty [simp]: "Field {} = {}" by (simp add: Field_def) lemma Domain_empty_iff: "Domain r = {} \ r = {}" by auto lemma Range_empty_iff: "Range r = {} \ r = {}" by auto lemma Domain_insert [simp]: "Domain (insert (a, b) r) = insert a (Domain r)" by blast lemma Range_insert [simp]: "Range (insert (a, b) r) = insert b (Range r)" by blast lemma Field_insert [simp]: "Field (insert (a, b) r) = {a, b} \ Field r" by (auto simp add: Field_def) lemma Domain_iff: "a \ Domain r \ (\y. (a, y) \ r)" by blast lemma Range_iff: "a \ Range r \ (\y. (y, a) \ r)" by blast lemma Domain_Id [simp]: "Domain Id = UNIV" by blast lemma Range_Id [simp]: "Range Id = UNIV" by blast lemma Domain_Id_on [simp]: "Domain (Id_on A) = A" by blast lemma Range_Id_on [simp]: "Range (Id_on A) = A" by blast lemma Domain_Un_eq: "Domain (A \ B) = Domain A \ Domain B" by blast lemma Range_Un_eq: "Range (A \ B) = Range A \ Range B" by blast lemma Field_Un [simp]: "Field (r \ s) = Field r \ Field s" by (auto simp: Field_def) lemma Domain_Int_subset: "Domain (A \ B) \ Domain A \ Domain B" by blast lemma Range_Int_subset: "Range (A \ B) \ Range A \ Range B" by blast lemma Domain_Diff_subset: "Domain A - Domain B \ Domain (A - B)" by blast lemma Range_Diff_subset: "Range A - Range B \ Range (A - B)" by blast lemma Domain_Union: "Domain (\S) = (\A\S. Domain A)" by blast lemma Range_Union: "Range (\S) = (\A\S. Range A)" by blast lemma Field_Union [simp]: "Field (\R) = \(Field ` R)" by (auto simp: Field_def) lemma Domain_converse [simp]: "Domain (r\) = Range r" by auto lemma Range_converse [simp]: "Range (r\) = Domain r" by blast lemma Field_converse [simp]: "Field (r\) = Field r" by (auto simp: Field_def) lemma Domain_Collect_case_prod [simp]: "Domain {(x, y). P x y} = {x. \y. P x y}" by auto lemma Range_Collect_case_prod [simp]: "Range {(x, y). P x y} = {y. \x. P x y}" by auto lemma finite_Domain: "finite r \ finite (Domain r)" by (induct set: finite) auto lemma finite_Range: "finite r \ finite (Range r)" by (induct set: finite) auto lemma finite_Field: "finite r \ finite (Field r)" by (simp add: Field_def finite_Domain finite_Range) lemma Domain_mono: "r \ s \ Domain r \ Domain s" by blast lemma Range_mono: "r \ s \ Range r \ Range s" by blast lemma mono_Field: "r \ s \ Field r \ Field s" by (auto simp: Field_def Domain_def Range_def) lemma Domain_unfold: "Domain r = {x. \y. (x, y) \ r}" by blast lemma Field_square [simp]: "Field (x \ x) = x" unfolding Field_def by blast subsubsection \Image of a set under a relation\ definition Image :: "('a \ 'b) set \ 'a set \ 'b set" (infixr "``" 90) where "r `` s = {y. \x\s. (x, y) \ r}" lemma Image_iff: "b \ r``A \ (\x\A. (x, b) \ r)" by (simp add: Image_def) lemma Image_singleton: "r``{a} = {b. (a, b) \ r}" by (simp add: Image_def) lemma Image_singleton_iff [iff]: "b \ r``{a} \ (a, b) \ r" by (rule Image_iff [THEN trans]) simp lemma ImageI [intro]: "(a, b) \ r \ a \ A \ b \ r``A" unfolding Image_def by blast lemma ImageE [elim!]: "b \ r `` A \ (\x. (x, b) \ r \ x \ A \ P) \ P" unfolding Image_def by (iprover elim!: CollectE bexE) lemma rev_ImageI: "a \ A \ (a, b) \ r \ b \ r `` A" \ \This version's more effective when we already have the required \a\\ by blast lemma Image_empty1 [simp]: "{} `` X = {}" by auto lemma Image_empty2 [simp]: "R``{} = {}" by blast lemma Image_Id [simp]: "Id `` A = A" by blast lemma Image_Id_on [simp]: "Id_on A `` B = A \ B" by blast lemma Image_Int_subset: "R `` (A \ B) \ R `` A \ R `` B" by blast lemma Image_Int_eq: "single_valued (converse R) \ R `` (A \ B) = R `` A \ R `` B" by (auto simp: single_valued_def) lemma Image_Un: "R `` (A \ B) = R `` A \ R `` B" by blast lemma Un_Image: "(R \ S) `` A = R `` A \ S `` A" by blast lemma Image_subset: "r \ A \ B \ r``C \ B" by (iprover intro!: subsetI elim!: ImageE dest!: subsetD SigmaD2) lemma Image_eq_UN: "r``B = (\y\ B. r``{y})" \ \NOT suitable for rewriting\ by blast lemma Image_mono: "r' \ r \ A' \ A \ (r' `` A') \ (r `` A)" by blast lemma Image_UN: "r `` (\(B ` A)) = (\x\A. r `` (B x))" by blast lemma UN_Image: "(\i\I. X i) `` S = (\i\I. X i `` S)" by auto lemma Image_INT_subset: "(r `` (\(B ` A))) \ (\x\A. r `` (B x))" by blast text \Converse inclusion requires some assumptions\ lemma Image_INT_eq: "single_valued (r\) \ A \ {} \ r `` (\(B ` A)) = (\x\A. r `` B x)" apply (rule equalityI) apply (rule Image_INT_subset) apply (auto simp add: single_valued_def) apply blast done lemma Image_subset_eq: "r``A \ B \ A \ - ((r\) `` (- B))" by blast lemma Image_Collect_case_prod [simp]: "{(x, y). P x y} `` A = {y. \x\A. P x y}" by auto lemma Sigma_Image: "(SIGMA x:A. B x) `` X = (\x\X \ A. B x)" by auto lemma relcomp_Image: "(X O Y) `` Z = Y `` (X `` Z)" by auto lemma finite_Image[simp]: assumes "finite R" shows "finite (R `` A)" by(rule finite_subset[OF _ finite_Range[OF assms]]) auto subsubsection \Inverse image\ definition inv_image :: "'b rel \ ('a \ 'b) \ 'a rel" where "inv_image r f = {(x, y). (f x, f y) \ r}" definition inv_imagep :: "('b \ 'b \ bool) \ ('a \ 'b) \ 'a \ 'a \ bool" where "inv_imagep r f = (\x y. r (f x) (f y))" lemma [pred_set_conv]: "inv_imagep (\x y. (x, y) \ r) f = (\x y. (x, y) \ inv_image r f)" by (simp add: inv_image_def inv_imagep_def) lemma sym_inv_image: "sym r \ sym (inv_image r f)" unfolding sym_def inv_image_def by blast lemma trans_inv_image: "trans r \ trans (inv_image r f)" unfolding trans_def inv_image_def by (simp (no_asm)) blast lemma total_inv_image: "\inj f; total r\ \ total (inv_image r f)" unfolding inv_image_def total_on_def by (auto simp: inj_eq) lemma asym_inv_image: "asym R \ asym (inv_image R f)" by (simp add: inv_image_def asym_iff) lemma in_inv_image[simp]: "(x, y) \ inv_image r f \ (f x, f y) \ r" by (auto simp: inv_image_def) lemma converse_inv_image[simp]: "(inv_image R f)\ = inv_image (R\) f" unfolding inv_image_def converse_unfold by auto lemma in_inv_imagep [simp]: "inv_imagep r f x y = r (f x) (f y)" by (simp add: inv_imagep_def) subsubsection \Powerset\ definition Powp :: "('a \ bool) \ 'a set \ bool" where "Powp A = (\B. \x \ B. A x)" lemma Powp_Pow_eq [pred_set_conv]: "Powp (\x. x \ A) = (\x. x \ Pow A)" by (auto simp add: Powp_def fun_eq_iff) lemmas Powp_mono [mono] = Pow_mono [to_pred] subsubsection \Expressing relation operations via \<^const>\Finite_Set.fold\\ lemma Id_on_fold: assumes "finite A" shows "Id_on A = Finite_Set.fold (\x. Set.insert (Pair x x)) {} A" proof - interpret comp_fun_commute "\x. Set.insert (Pair x x)" by standard auto from assms show ?thesis unfolding Id_on_def by (induct A) simp_all qed lemma comp_fun_commute_Image_fold: "comp_fun_commute (\(x,y) A. if x \ S then Set.insert y A else A)" proof - interpret comp_fun_idem Set.insert by (fact comp_fun_idem_insert) show ?thesis by standard (auto simp: fun_eq_iff comp_fun_commute split: prod.split) qed lemma Image_fold: assumes "finite R" shows "R `` S = Finite_Set.fold (\(x,y) A. if x \ S then Set.insert y A else A) {} R" proof - interpret comp_fun_commute "(\(x,y) A. if x \ S then Set.insert y A else A)" by (rule comp_fun_commute_Image_fold) have *: "\x F. Set.insert x F `` S = (if fst x \ S then Set.insert (snd x) (F `` S) else (F `` S))" by (force intro: rev_ImageI) show ?thesis using assms by (induct R) (auto simp: *) qed lemma insert_relcomp_union_fold: assumes "finite S" shows "{x} O S \ X = Finite_Set.fold (\(w,z) A'. if snd x = w then Set.insert (fst x,z) A' else A') X S" proof - interpret comp_fun_commute "\(w,z) A'. if snd x = w then Set.insert (fst x,z) A' else A'" proof - interpret comp_fun_idem Set.insert by (fact comp_fun_idem_insert) show "comp_fun_commute (\(w,z) A'. if snd x = w then Set.insert (fst x,z) A' else A')" by standard (auto simp add: fun_eq_iff split: prod.split) qed have *: "{x} O S = {(x', z). x' = fst x \ (snd x, z) \ S}" by (auto simp: relcomp_unfold intro!: exI) show ?thesis unfolding * using \finite S\ by (induct S) (auto split: prod.split) qed lemma insert_relcomp_fold: assumes "finite S" shows "Set.insert x R O S = Finite_Set.fold (\(w,z) A'. if snd x = w then Set.insert (fst x,z) A' else A') (R O S) S" proof - have "Set.insert x R O S = ({x} O S) \ (R O S)" by auto then show ?thesis by (auto simp: insert_relcomp_union_fold [OF assms]) qed lemma comp_fun_commute_relcomp_fold: assumes "finite S" shows "comp_fun_commute (\(x,y) A. Finite_Set.fold (\(w,z) A'. if y = w then Set.insert (x,z) A' else A') A S)" proof - have *: "\a b A. Finite_Set.fold (\(w, z) A'. if b = w then Set.insert (a, z) A' else A') A S = {(a,b)} O S \ A" by (auto simp: insert_relcomp_union_fold[OF assms] cong: if_cong) show ?thesis by standard (auto simp: *) qed lemma relcomp_fold: assumes "finite R" "finite S" shows "R O S = Finite_Set.fold (\(x,y) A. Finite_Set.fold (\(w,z) A'. if y = w then Set.insert (x,z) A' else A') A S) {} R" proof - interpret commute_relcomp_fold: comp_fun_commute "(\(x, y) A. Finite_Set.fold (\(w, z) A'. if y = w then insert (x, z) A' else A') A S)" by (fact comp_fun_commute_relcomp_fold[OF \finite S\]) from assms show ?thesis by (induct R) (auto simp: comp_fun_commute_relcomp_fold insert_relcomp_fold cong: if_cong) qed end diff --git a/src/HOL/Set_Interval.thy b/src/HOL/Set_Interval.thy --- a/src/HOL/Set_Interval.thy +++ b/src/HOL/Set_Interval.thy @@ -1,2564 +1,2564 @@ (* Title: HOL/Set_Interval.thy Author: Tobias Nipkow, Clemens Ballarin, Jeremy Avigad lessThan, greaterThan, atLeast, atMost and two-sided intervals Modern convention: Ixy stands for an interval where x and y describe the lower and upper bound and x,y : {c,o,i} where c = closed, o = open, i = infinite. Examples: Ico = {_ ..< _} and Ici = {_ ..} *) section \Set intervals\ theory Set_Interval imports Divides begin (* Belongs in Finite_Set but 2 is not available there *) lemma card_2_iff: "card S = 2 \ (\x y. S = {x,y} \ x \ y)" by (auto simp: card_Suc_eq numeral_eq_Suc) lemma card_2_iff': "card S = 2 \ (\x\S. \y\S. x \ y \ (\z\S. z = x \ z = y))" by (auto simp: card_Suc_eq numeral_eq_Suc) context ord begin definition lessThan :: "'a => 'a set" ("(1{..<_})") where "{.. 'a set" ("(1{.._})") where "{..u} == {x. x \ u}" definition greaterThan :: "'a => 'a set" ("(1{_<..})") where "{l<..} == {x. l 'a set" ("(1{_..})") where "{l..} == {x. l\x}" definition greaterThanLessThan :: "'a => 'a => 'a set" ("(1{_<..<_})") where "{l<.. 'a => 'a set" ("(1{_..<_})") where "{l.. 'a => 'a set" ("(1{_<.._})") where "{l<..u} == {l<..} Int {..u}" definition atLeastAtMost :: "'a => 'a => 'a set" ("(1{_.._})") where "{l..u} == {l..} Int {..u}" end text\A note of warning when using \<^term>\{.. on type \<^typ>\nat\: it is equivalent to \<^term>\{0::nat.. but some lemmas involving \<^term>\{m.. may not exist in \<^term>\{..-form as well.\ syntax (ASCII) "_UNION_le" :: "'a => 'a => 'b set => 'b set" ("(3UN _<=_./ _)" [0, 0, 10] 10) "_UNION_less" :: "'a => 'a => 'b set => 'b set" ("(3UN _<_./ _)" [0, 0, 10] 10) "_INTER_le" :: "'a => 'a => 'b set => 'b set" ("(3INT _<=_./ _)" [0, 0, 10] 10) "_INTER_less" :: "'a => 'a => 'b set => 'b set" ("(3INT _<_./ _)" [0, 0, 10] 10) syntax (latex output) "_UNION_le" :: "'a \ 'a => 'b set => 'b set" ("(3\(\unbreakable\_ \ _)/ _)" [0, 0, 10] 10) "_UNION_less" :: "'a \ 'a => 'b set => 'b set" ("(3\(\unbreakable\_ < _)/ _)" [0, 0, 10] 10) "_INTER_le" :: "'a \ 'a => 'b set => 'b set" ("(3\(\unbreakable\_ \ _)/ _)" [0, 0, 10] 10) "_INTER_less" :: "'a \ 'a => 'b set => 'b set" ("(3\(\unbreakable\_ < _)/ _)" [0, 0, 10] 10) syntax "_UNION_le" :: "'a => 'a => 'b set => 'b set" ("(3\_\_./ _)" [0, 0, 10] 10) "_UNION_less" :: "'a => 'a => 'b set => 'b set" ("(3\_<_./ _)" [0, 0, 10] 10) "_INTER_le" :: "'a => 'a => 'b set => 'b set" ("(3\_\_./ _)" [0, 0, 10] 10) "_INTER_less" :: "'a => 'a => 'b set => 'b set" ("(3\_<_./ _)" [0, 0, 10] 10) translations "\i\n. A" \ "\i\{..n}. A" "\i "\i\{..i\n. A" \ "\i\{..n}. A" "\i "\i\{..Various equivalences\ lemma (in ord) lessThan_iff [iff]: "(i \ lessThan k) = (i greaterThan k) = (k atLeast k) = (k<=i)" by (simp add: atLeast_def) lemma Compl_atLeast [simp]: "!!k:: 'a::linorder. -atLeast k = lessThan k" by (auto simp add: lessThan_def atLeast_def) lemma (in ord) atMost_iff [iff]: "(i \ atMost k) = (i<=k)" by (simp add: atMost_def) lemma atMost_Int_atLeast: "!!n:: 'a::order. atMost n Int atLeast n = {n}" by (blast intro: order_antisym) lemma (in linorder) lessThan_Int_lessThan: "{ a <..} \ { b <..} = { max a b <..}" by auto lemma (in linorder) greaterThan_Int_greaterThan: "{..< a} \ {..< b} = {..< min a b}" by auto subsection \Logical Equivalences for Set Inclusion and Equality\ lemma atLeast_empty_triv [simp]: "{{}..} = UNIV" by auto lemma atMost_UNIV_triv [simp]: "{..UNIV} = UNIV" by auto lemma atLeast_subset_iff [iff]: "(atLeast x \ atLeast y) = (y \ (x::'a::preorder))" by (blast intro: order_trans) lemma atLeast_eq_iff [iff]: "(atLeast x = atLeast y) = (x = (y::'a::order))" by (blast intro: order_antisym order_trans) lemma greaterThan_subset_iff [iff]: "(greaterThan x \ greaterThan y) = (y \ (x::'a::linorder))" unfolding greaterThan_def by (auto simp: linorder_not_less [symmetric]) lemma greaterThan_eq_iff [iff]: "(greaterThan x = greaterThan y) = (x = (y::'a::linorder))" by (auto simp: elim!: equalityE) lemma atMost_subset_iff [iff]: "(atMost x \ atMost y) = (x \ (y::'a::preorder))" by (blast intro: order_trans) lemma atMost_eq_iff [iff]: "(atMost x = atMost y) = (x = (y::'a::order))" by (blast intro: order_antisym order_trans) lemma lessThan_subset_iff [iff]: "(lessThan x \ lessThan y) = (x \ (y::'a::linorder))" unfolding lessThan_def by (auto simp: linorder_not_less [symmetric]) lemma lessThan_eq_iff [iff]: "(lessThan x = lessThan y) = (x = (y::'a::linorder))" by (auto simp: elim!: equalityE) lemma lessThan_strict_subset_iff: fixes m n :: "'a::linorder" shows "{.. m < n" by (metis leD lessThan_subset_iff linorder_linear not_less_iff_gr_or_eq psubset_eq) lemma (in linorder) Ici_subset_Ioi_iff: "{a ..} \ {b <..} \ b < a" by auto lemma (in linorder) Iic_subset_Iio_iff: "{.. a} \ {..< b} \ a < b" by auto lemma (in preorder) Ioi_le_Ico: "{a <..} \ {a ..}" by (auto intro: less_imp_le) subsection \Two-sided intervals\ context ord begin lemma greaterThanLessThan_iff [simp]: "(i \ {l<.. i < u)" by (simp add: greaterThanLessThan_def) lemma atLeastLessThan_iff [simp]: "(i \ {l.. i \ i < u)" by (simp add: atLeastLessThan_def) lemma greaterThanAtMost_iff [simp]: "(i \ {l<..u}) = (l < i \ i \ u)" by (simp add: greaterThanAtMost_def) lemma atLeastAtMost_iff [simp]: "(i \ {l..u}) = (l \ i \ i \ u)" by (simp add: atLeastAtMost_def) text \The above four lemmas could be declared as iffs. Unfortunately this breaks many proofs. Since it only helps blast, it is better to leave them alone.\ lemma greaterThanLessThan_eq: "{ a <..< b} = { a <..} \ {..< b }" by auto lemma (in order) atLeastLessThan_eq_atLeastAtMost_diff: "{a..Emptyness, singletons, subset\ context preorder begin lemma atLeastatMost_empty_iff[simp]: "{a..b} = {} \ (\ a \ b)" by auto (blast intro: order_trans) lemma atLeastatMost_empty_iff2[simp]: "{} = {a..b} \ (\ a \ b)" by auto (blast intro: order_trans) lemma atLeastLessThan_empty_iff[simp]: "{a.. (\ a < b)" by auto (blast intro: le_less_trans) lemma atLeastLessThan_empty_iff2[simp]: "{} = {a.. (\ a < b)" by auto (blast intro: le_less_trans) lemma greaterThanAtMost_empty_iff[simp]: "{k<..l} = {} \ \ k < l" by auto (blast intro: less_le_trans) lemma greaterThanAtMost_empty_iff2[simp]: "{} = {k<..l} \ \ k < l" by auto (blast intro: less_le_trans) lemma atLeastatMost_subset_iff[simp]: "{a..b} \ {c..d} \ (\ a \ b) \ c \ a \ b \ d" unfolding atLeastAtMost_def atLeast_def atMost_def by (blast intro: order_trans) lemma atLeastatMost_psubset_iff: "{a..b} < {c..d} \ ((\ a \ b) \ c \ a \ b \ d \ (c < a \ b < d)) \ c \ d" by(simp add: psubset_eq set_eq_iff less_le_not_le)(blast intro: order_trans) lemma atLeastAtMost_subseteq_atLeastLessThan_iff: "{a..b} \ {c ..< d} \ (a \ b \ c \ a \ b < d)" by auto (blast intro: local.order_trans local.le_less_trans elim: )+ lemma Icc_subset_Ici_iff[simp]: "{l..h} \ {l'..} = (\ l\h \ l\l')" by(auto simp: subset_eq intro: order_trans) lemma Icc_subset_Iic_iff[simp]: "{l..h} \ {..h'} = (\ l\h \ h\h')" by(auto simp: subset_eq intro: order_trans) lemma not_Ici_eq_empty[simp]: "{l..} \ {}" by(auto simp: set_eq_iff) lemma not_Iic_eq_empty[simp]: "{..h} \ {}" by(auto simp: set_eq_iff) lemmas not_empty_eq_Ici_eq_empty[simp] = not_Ici_eq_empty[symmetric] lemmas not_empty_eq_Iic_eq_empty[simp] = not_Iic_eq_empty[symmetric] end context order begin lemma atLeastatMost_empty[simp]: "b < a \ {a..b} = {}" by(auto simp: atLeastAtMost_def atLeast_def atMost_def) lemma atLeastLessThan_empty[simp]: "b \ a \ {a.. k ==> {k<..l} = {}" by(auto simp:greaterThanAtMost_def greaterThan_def atMost_def) lemma greaterThanLessThan_empty[simp]:"l \ k ==> {k<.. {a .. b} = {a}" by simp lemma Icc_eq_Icc[simp]: "{l..h} = {l'..h'} = (l=l' \ h=h' \ \ l\h \ \ l'\h')" by (simp add: order_class.order.eq_iff) (auto intro: order_trans) lemma atLeastAtMost_singleton_iff[simp]: "{a .. b} = {c} \ a = b \ b = c" proof assume "{a..b} = {c}" hence *: "\ (\ a \ b)" unfolding atLeastatMost_empty_iff[symmetric] by simp with \{a..b} = {c}\ have "c \ a \ b \ c" by auto with * show "a = b \ b = c" by auto qed simp end context no_top begin (* also holds for no_bot but no_top should suffice *) lemma not_UNIV_le_Icc[simp]: "\ UNIV \ {l..h}" using gt_ex[of h] by(auto simp: subset_eq less_le_not_le) lemma not_UNIV_le_Iic[simp]: "\ UNIV \ {..h}" using gt_ex[of h] by(auto simp: subset_eq less_le_not_le) lemma not_Ici_le_Icc[simp]: "\ {l..} \ {l'..h'}" using gt_ex[of h'] by(auto simp: subset_eq less_le)(blast dest:antisym_conv intro: order_trans) lemma not_Ici_le_Iic[simp]: "\ {l..} \ {..h'}" using gt_ex[of h'] by(auto simp: subset_eq less_le)(blast dest:antisym_conv intro: order_trans) end context no_bot begin lemma not_UNIV_le_Ici[simp]: "\ UNIV \ {l..}" using lt_ex[of l] by(auto simp: subset_eq less_le_not_le) lemma not_Iic_le_Icc[simp]: "\ {..h} \ {l'..h'}" using lt_ex[of l'] by(auto simp: subset_eq less_le)(blast dest:antisym_conv intro: order_trans) lemma not_Iic_le_Ici[simp]: "\ {..h} \ {l'..}" using lt_ex[of l'] by(auto simp: subset_eq less_le)(blast dest:antisym_conv intro: order_trans) end context no_top begin (* also holds for no_bot but no_top should suffice *) lemma not_UNIV_eq_Icc[simp]: "\ UNIV = {l'..h'}" using gt_ex[of h'] by(auto simp: set_eq_iff less_le_not_le) lemmas not_Icc_eq_UNIV[simp] = not_UNIV_eq_Icc[symmetric] lemma not_UNIV_eq_Iic[simp]: "\ UNIV = {..h'}" using gt_ex[of h'] by(auto simp: set_eq_iff less_le_not_le) lemmas not_Iic_eq_UNIV[simp] = not_UNIV_eq_Iic[symmetric] lemma not_Icc_eq_Ici[simp]: "\ {l..h} = {l'..}" unfolding atLeastAtMost_def using not_Ici_le_Iic[of l'] by blast lemmas not_Ici_eq_Icc[simp] = not_Icc_eq_Ici[symmetric] (* also holds for no_bot but no_top should suffice *) lemma not_Iic_eq_Ici[simp]: "\ {..h} = {l'..}" using not_Ici_le_Iic[of l' h] by blast lemmas not_Ici_eq_Iic[simp] = not_Iic_eq_Ici[symmetric] end context no_bot begin lemma not_UNIV_eq_Ici[simp]: "\ UNIV = {l'..}" using lt_ex[of l'] by(auto simp: set_eq_iff less_le_not_le) lemmas not_Ici_eq_UNIV[simp] = not_UNIV_eq_Ici[symmetric] lemma not_Icc_eq_Iic[simp]: "\ {l..h} = {..h'}" unfolding atLeastAtMost_def using not_Iic_le_Ici[of h'] by blast lemmas not_Iic_eq_Icc[simp] = not_Icc_eq_Iic[symmetric] end context dense_linorder begin lemma greaterThanLessThan_empty_iff[simp]: "{ a <..< b } = {} \ b \ a" using dense[of a b] by (cases "a < b") auto lemma greaterThanLessThan_empty_iff2[simp]: "{} = { a <..< b } \ b \ a" using dense[of a b] by (cases "a < b") auto lemma atLeastLessThan_subseteq_atLeastAtMost_iff: "{a ..< b} \ { c .. d } \ (a < b \ c \ a \ b \ d)" using dense[of "max a d" "b"] by (force simp: subset_eq Ball_def not_less[symmetric]) lemma greaterThanAtMost_subseteq_atLeastAtMost_iff: "{a <.. b} \ { c .. d } \ (a < b \ c \ a \ b \ d)" using dense[of "a" "min c b"] by (force simp: subset_eq Ball_def not_less[symmetric]) lemma greaterThanLessThan_subseteq_atLeastAtMost_iff: "{a <..< b} \ { c .. d } \ (a < b \ c \ a \ b \ d)" using dense[of "a" "min c b"] dense[of "max a d" "b"] by (force simp: subset_eq Ball_def not_less[symmetric]) lemma greaterThanLessThan_subseteq_greaterThanLessThan: "{a <..< b} \ {c <..< d} \ (a < b \ a \ c \ b \ d)" using dense[of "a" "min c b"] dense[of "max a d" "b"] by (force simp: subset_eq Ball_def not_less[symmetric]) lemma greaterThanAtMost_subseteq_atLeastLessThan_iff: "{a <.. b} \ { c ..< d } \ (a < b \ c \ a \ b < d)" using dense[of "a" "min c b"] by (force simp: subset_eq Ball_def not_less[symmetric]) lemma greaterThanLessThan_subseteq_atLeastLessThan_iff: "{a <..< b} \ { c ..< d } \ (a < b \ c \ a \ b \ d)" using dense[of "a" "min c b"] dense[of "max a d" "b"] by (force simp: subset_eq Ball_def not_less[symmetric]) lemma greaterThanLessThan_subseteq_greaterThanAtMost_iff: "{a <..< b} \ { c <.. d } \ (a < b \ c \ a \ b \ d)" using dense[of "a" "min c b"] dense[of "max a d" "b"] by (force simp: subset_eq Ball_def not_less[symmetric]) end context no_top begin lemma greaterThan_non_empty[simp]: "{x <..} \ {}" using gt_ex[of x] by auto end context no_bot begin lemma lessThan_non_empty[simp]: "{..< x} \ {}" using lt_ex[of x] by auto end lemma (in linorder) atLeastLessThan_subset_iff: "{a.. {c.. b \ a \ c\a \ b\d" apply (auto simp:subset_eq Ball_def not_le) apply(frule_tac x=a in spec) apply(erule_tac x=d in allE) apply (auto simp: ) done lemma atLeastLessThan_inj: fixes a b c d :: "'a::linorder" assumes eq: "{a ..< b} = {c ..< d}" and "a < b" "c < d" shows "a = c" "b = d" using assms by (metis atLeastLessThan_subset_iff eq less_le_not_le antisym_conv2 subset_refl)+ lemma atLeastLessThan_eq_iff: fixes a b c d :: "'a::linorder" assumes "a < b" "c < d" shows "{a ..< b} = {c ..< d} \ a = c \ b = d" using atLeastLessThan_inj assms by auto lemma (in linorder) Ioc_inj: \{a <.. b} = {c <.. d} \ (b \ a \ d \ c) \ a = c \ b = d\ (is \?P \ ?Q\) proof assume ?Q then show ?P by auto next assume ?P then have \a < x \ x \ b \ c < x \ x \ d\ for x by (simp add: set_eq_iff) from this [of a] this [of b] this [of c] this [of d] show ?Q by auto qed lemma (in order) Iio_Int_singleton: "{.. {x} = (if x < k then {x} else {})" by auto lemma (in linorder) Ioc_subset_iff: "{a<..b} \ {c<..d} \ (b \ a \ c \ a \ b \ d)" by (auto simp: subset_eq Ball_def) (metis less_le not_less) lemma (in order_bot) atLeast_eq_UNIV_iff: "{x..} = UNIV \ x = bot" by (auto simp: set_eq_iff intro: le_bot) lemma (in order_top) atMost_eq_UNIV_iff: "{..x} = UNIV \ x = top" by (auto simp: set_eq_iff intro: top_le) lemma (in bounded_lattice) atLeastAtMost_eq_UNIV_iff: "{x..y} = UNIV \ (x = bot \ y = top)" by (auto simp: set_eq_iff intro: top_le le_bot) lemma Iio_eq_empty_iff: "{..< n::'a::{linorder, order_bot}} = {} \ n = bot" by (auto simp: set_eq_iff not_less le_bot) lemma lessThan_empty_iff: "{..< n::nat} = {} \ n = 0" by (simp add: Iio_eq_empty_iff bot_nat_def) lemma mono_image_least: assumes f_mono: "mono f" and f_img: "f ` {m ..< n} = {m' ..< n'}" "m < n" shows "f m = m'" proof - from f_img have "{m' ..< n'} \ {}" by (metis atLeastLessThan_empty_iff image_is_empty) with f_img have "m' \ f ` {m ..< n}" by auto then obtain k where "f k = m'" "m \ k" by auto moreover have "m' \ f m" using f_img by auto ultimately show "f m = m'" using f_mono by (auto elim: monoE[where x=m and y=k]) qed subsection \Infinite intervals\ context dense_linorder begin lemma infinite_Ioo: assumes "a < b" shows "\ finite {a<.. {}" using \a < b\ by auto ultimately have "a < Max {a <..< b}" "Max {a <..< b} < b" using Max_in[of "{a <..< b}"] by auto then obtain x where "Max {a <..< b} < x" "x < b" using dense[of "Max {a<.. {a <..< b}" using \a < Max {a <..< b}\ by auto then have "x \ Max {a <..< b}" using fin by auto with \Max {a <..< b} < x\ show False by auto qed lemma infinite_Icc: "a < b \ \ finite {a .. b}" using greaterThanLessThan_subseteq_atLeastAtMost_iff[of a b a b] infinite_Ioo[of a b] by (auto dest: finite_subset) lemma infinite_Ico: "a < b \ \ finite {a ..< b}" using greaterThanLessThan_subseteq_atLeastLessThan_iff[of a b a b] infinite_Ioo[of a b] by (auto dest: finite_subset) lemma infinite_Ioc: "a < b \ \ finite {a <.. b}" using greaterThanLessThan_subseteq_greaterThanAtMost_iff[of a b a b] infinite_Ioo[of a b] by (auto dest: finite_subset) lemma infinite_Ioo_iff [simp]: "infinite {a<.. a < b" using not_less_iff_gr_or_eq by (fastforce simp: infinite_Ioo) lemma infinite_Icc_iff [simp]: "infinite {a .. b} \ a < b" using not_less_iff_gr_or_eq by (fastforce simp: infinite_Icc) lemma infinite_Ico_iff [simp]: "infinite {a.. a < b" using not_less_iff_gr_or_eq by (fastforce simp: infinite_Ico) lemma infinite_Ioc_iff [simp]: "infinite {a<..b} \ a < b" using not_less_iff_gr_or_eq by (fastforce simp: infinite_Ioc) end lemma infinite_Iio: "\ finite {..< a :: 'a :: {no_bot, linorder}}" proof assume "finite {..< a}" then have *: "\x. x < a \ Min {..< a} \ x" by auto obtain x where "x < a" using lt_ex by auto obtain y where "y < Min {..< a}" using lt_ex by auto also have "Min {..< a} \ x" using \x < a\ by fact also note \x < a\ finally have "Min {..< a} \ y" by fact with \y < Min {..< a}\ show False by auto qed lemma infinite_Iic: "\ finite {.. a :: 'a :: {no_bot, linorder}}" using infinite_Iio[of a] finite_subset[of "{..< a}" "{.. a}"] by (auto simp: subset_eq less_imp_le) lemma infinite_Ioi: "\ finite {a :: 'a :: {no_top, linorder} <..}" proof assume "finite {a <..}" then have *: "\x. a < x \ x \ Max {a <..}" by auto obtain y where "Max {a <..} < y" using gt_ex by auto obtain x where x: "a < x" using gt_ex by auto also from x have "x \ Max {a <..}" by fact also note \Max {a <..} < y\ finally have "y \ Max { a <..}" by fact with \Max {a <..} < y\ show False by auto qed lemma infinite_Ici: "\ finite {a :: 'a :: {no_top, linorder} ..}" using infinite_Ioi[of a] finite_subset[of "{a <..}" "{a ..}"] by (auto simp: subset_eq less_imp_le) subsubsection \Intersection\ context linorder begin lemma Int_atLeastAtMost[simp]: "{a..b} Int {c..d} = {max a c .. min b d}" by auto lemma Int_atLeastAtMostR1[simp]: "{..b} Int {c..d} = {c .. min b d}" by auto lemma Int_atLeastAtMostR2[simp]: "{a..} Int {c..d} = {max a c .. d}" by auto lemma Int_atLeastAtMostL1[simp]: "{a..b} Int {..d} = {a .. min b d}" by auto lemma Int_atLeastAtMostL2[simp]: "{a..b} Int {c..} = {max a c .. b}" by auto lemma Int_atLeastLessThan[simp]: "{a.. {..b} = {.. min a b}" by (auto simp: min_def) lemma Ioc_disjoint: "{a<..b} \ {c<..d} = {} \ b \ a \ d \ c \ b \ c \ d \ a" by auto end context complete_lattice begin lemma shows Sup_atLeast[simp]: "Sup {x ..} = top" and Sup_greaterThanAtLeast[simp]: "x < top \ Sup {x <..} = top" and Sup_atMost[simp]: "Sup {.. y} = y" and Sup_atLeastAtMost[simp]: "x \ y \ Sup { x .. y} = y" and Sup_greaterThanAtMost[simp]: "x < y \ Sup { x <.. y} = y" by (auto intro!: Sup_eqI) lemma shows Inf_atMost[simp]: "Inf {.. x} = bot" and Inf_atMostLessThan[simp]: "top < x \ Inf {..< x} = bot" and Inf_atLeast[simp]: "Inf {x ..} = x" and Inf_atLeastAtMost[simp]: "x \ y \ Inf { x .. y} = x" and Inf_atLeastLessThan[simp]: "x < y \ Inf { x ..< y} = x" by (auto intro!: Inf_eqI) end lemma fixes x y :: "'a :: {complete_lattice, dense_linorder}" shows Sup_lessThan[simp]: "Sup {..< y} = y" and Sup_atLeastLessThan[simp]: "x < y \ Sup { x ..< y} = y" and Sup_greaterThanLessThan[simp]: "x < y \ Sup { x <..< y} = y" and Inf_greaterThan[simp]: "Inf {x <..} = x" and Inf_greaterThanAtMost[simp]: "x < y \ Inf { x <.. y} = x" and Inf_greaterThanLessThan[simp]: "x < y \ Inf { x <..< y} = x" by (auto intro!: Inf_eqI Sup_eqI intro: dense_le dense_le_bounded dense_ge dense_ge_bounded) subsection \Intervals of natural numbers\ subsubsection \The Constant \<^term>\lessThan\\ lemma lessThan_0 [simp]: "lessThan (0::nat) = {}" by (simp add: lessThan_def) lemma lessThan_Suc: "lessThan (Suc k) = insert k (lessThan k)" by (simp add: lessThan_def less_Suc_eq, blast) text \The following proof is convenient in induction proofs where new elements get indices at the beginning. So it is used to transform \<^term>\{.. to \<^term>\0::nat\ and \<^term>\{..< n}\.\ lemma zero_notin_Suc_image [simp]: "0 \ Suc ` A" by auto lemma lessThan_Suc_eq_insert_0: "{..m::nat. lessThan m) = UNIV" by blast subsubsection \The Constant \<^term>\greaterThan\\ lemma greaterThan_0: "greaterThan 0 = range Suc" unfolding greaterThan_def by (blast dest: gr0_conv_Suc [THEN iffD1]) lemma greaterThan_Suc: "greaterThan (Suc k) = greaterThan k - {Suc k}" unfolding greaterThan_def by (auto elim: linorder_neqE) lemma INT_greaterThan_UNIV: "(\m::nat. greaterThan m) = {}" by blast subsubsection \The Constant \<^term>\atLeast\\ lemma atLeast_0 [simp]: "atLeast (0::nat) = UNIV" by (unfold atLeast_def UNIV_def, simp) lemma atLeast_Suc: "atLeast (Suc k) = atLeast k - {k}" unfolding atLeast_def by (auto simp: order_le_less Suc_le_eq) lemma atLeast_Suc_greaterThan: "atLeast (Suc k) = greaterThan k" by (auto simp add: greaterThan_def atLeast_def less_Suc_eq_le) lemma UN_atLeast_UNIV: "(\m::nat. atLeast m) = UNIV" by blast subsubsection \The Constant \<^term>\atMost\\ lemma atMost_0 [simp]: "atMost (0::nat) = {0}" by (simp add: atMost_def) lemma atMost_Suc: "atMost (Suc k) = insert (Suc k) (atMost k)" unfolding atMost_def by (auto simp add: less_Suc_eq order_le_less) lemma UN_atMost_UNIV: "(\m::nat. atMost m) = UNIV" by blast subsubsection \The Constant \<^term>\atLeastLessThan\\ text\The orientation of the following 2 rules is tricky. The lhs is defined in terms of the rhs. Hence the chosen orientation makes sense in this theory --- the reverse orientation complicates proofs (eg nontermination). But outside, when the definition of the lhs is rarely used, the opposite orientation seems preferable because it reduces a specific concept to a more general one.\ lemma atLeast0LessThan [code_abbrev]: "{0::nat..The Constant \<^term>\atLeastAtMost\\ lemma Icc_eq_insert_lb_nat: "m \ n \ {m..n} = insert m {Suc m..n}" by auto lemma atLeast0_atMost_Suc: "{0..Suc n} = insert (Suc n) {0..n}" by (simp add: atLeast0AtMost atMost_Suc) lemma atLeast0_atMost_Suc_eq_insert_0: "{0..Suc n} = insert 0 (Suc ` {0..n})" by (simp add: atLeast0AtMost atMost_Suc_eq_insert_0) subsubsection \Intervals of nats with \<^term>\Suc\\ text\Not a simprule because the RHS is too messy.\ lemma atLeastLessThanSuc: "{m.. n then insert n {m.. Suc n \ {m..Suc n} = insert (Suc n) {m..n}" by auto lemma atLeastAtMost_insertL: "m \ n \ insert m {Suc m..n} = {m ..n}" by auto text \The analogous result is useful on \<^typ>\int\:\ (* here, because we don't have an own int section *) lemma atLeastAtMostPlus1_int_conv: "m \ 1+n \ {m..1+n} = insert (1+n) {m..n::int}" by (auto intro: set_eqI) lemma atLeastLessThan_add_Un: "i \ j \ {i.. {j..Intervals and numerals\ lemma lessThan_nat_numeral: \ \Evaluation for specific numerals\ "lessThan (numeral k :: nat) = insert (pred_numeral k) (lessThan (pred_numeral k))" by (simp add: numeral_eq_Suc lessThan_Suc) lemma atMost_nat_numeral: \ \Evaluation for specific numerals\ "atMost (numeral k :: nat) = insert (numeral k) (atMost (pred_numeral k))" by (simp add: numeral_eq_Suc atMost_Suc) lemma atLeastLessThan_nat_numeral: \ \Evaluation for specific numerals\ "atLeastLessThan m (numeral k :: nat) = (if m \ (pred_numeral k) then insert (pred_numeral k) (atLeastLessThan m (pred_numeral k)) else {})" by (simp add: numeral_eq_Suc atLeastLessThanSuc) subsubsection \Image\ context linordered_semidom begin lemma image_add_atLeast[simp]: "plus k ` {i..} = {k + i..}" proof - have "n = k + (n - k)" if "i + k \ n" for n proof - have "n = (n - (k + i)) + (k + i)" using that by (metis add_commute le_add_diff_inverse) then show "n = k + (n - k)" by (metis local.add_diff_cancel_left' add_assoc add_commute) qed then show ?thesis by (fastforce simp: add_le_imp_le_diff add.commute) qed lemma image_add_atLeastAtMost [simp]: "plus k ` {i..j} = {i + k..j + k}" (is "?A = ?B") proof show "?A \ ?B" by (auto simp add: ac_simps) next show "?B \ ?A" proof fix n assume "n \ ?B" then have "i \ n - k" by (simp add: add_le_imp_le_diff) have "n = n - k + k" proof - from \n \ ?B\ have "n = n - (i + k) + (i + k)" by simp also have "\ = n - k - i + i + k" by (simp add: algebra_simps) also have "\ = n - k + k" using \i \ n - k\ by simp finally show ?thesis . qed moreover have "n - k \ {i..j}" using \n \ ?B\ by (auto simp: add_le_imp_le_diff add_le_add_imp_diff_le) ultimately show "n \ ?A" by (simp add: ac_simps) qed qed lemma image_add_atLeastAtMost' [simp]: "(\n. n + k) ` {i..j} = {i + k..j + k}" by (simp add: add.commute [of _ k]) lemma image_add_atLeastLessThan [simp]: "plus k ` {i..n. n + k) ` {i.. uminus ` {x<..}" by (rule imageI) (simp add: *) thus "y \ uminus ` {x<..}" by simp next fix y assume "y \ -x" have "- (-y) \ uminus ` {x..}" by (rule imageI) (insert \y \ -x\[THEN le_imp_neg_le], simp) thus "y \ uminus ` {x..}" by simp qed simp_all lemma fixes x :: 'a shows image_uminus_lessThan[simp]: "uminus ` {.. = {c - b<..c - a}" by simp finally show ?thesis by simp qed lemma image_minus_const_greaterThanAtMost[simp]: fixes a b c::"'a::linordered_idom" shows "(-) c ` {a<..b} = {c - b.. = {c - b.. = {..c - a}" by simp finally show ?thesis by simp qed lemma image_minus_const_AtMost[simp]: fixes b c::"'a::linordered_idom" shows "(-) c ` {..b} = {c - b..}" proof - have "(-) c ` {..b} = (+) c ` uminus ` {..b}" unfolding image_image by simp also have "\ = {c - b..}" by simp finally show ?thesis by simp qed lemma image_minus_const_atLeastAtMost' [simp]: "(\t. t-d)`{a..b} = {a-d..b-d}" for d::"'a::linordered_idom" by (metis (no_types, lifting) diff_conv_add_uminus image_add_atLeastAtMost' image_cong) context linordered_field begin lemma image_mult_atLeastAtMost [simp]: "((*) d ` {a..b}) = {d*a..d*b}" if "d>0" using that by (auto simp: field_simps mult_le_cancel_right intro: rev_image_eqI [where x="x/d" for x]) lemma image_divide_atLeastAtMost [simp]: "((\c. c / d) ` {a..b}) = {a/d..b/d}" if "d>0" proof - from that have "inverse d > 0" by simp with image_mult_atLeastAtMost [of "inverse d" a b] have "(*) (inverse d) ` {a..b} = {inverse d * a..inverse d * b}" by blast moreover have "(*) (inverse d) = (\c. c / d)" by (simp add: fun_eq_iff field_simps) ultimately show ?thesis by simp qed lemma image_mult_atLeastAtMost_if: "(*) c ` {x .. y} = (if c > 0 then {c * x .. c * y} else if x \ y then {c * y .. c * x} else {})" proof (cases "c = 0 \ x > y") case True then show ?thesis by auto next case False then have "x \ y" by auto from False consider "c < 0"| "c > 0" by (auto simp add: neq_iff) then show ?thesis proof cases case 1 have "(*) c ` {x..y} = {c * y..c * x}" proof (rule set_eqI) fix d from 1 have "inj (\z. z / c)" by (auto intro: injI) then have "d \ (*) c ` {x..y} \ d / c \ (\z. z div c) ` (*) c ` {x..y}" by (subst inj_image_mem_iff) simp_all also have "\ \ d / c \ {x..y}" using 1 by (simp add: image_image) also have "\ \ d \ {c * y..c * x}" by (auto simp add: field_simps 1) finally show "d \ (*) c ` {x..y} \ d \ {c * y..c * x}" . qed with \x \ y\ show ?thesis by auto qed (simp add: mult_left_mono_neg) qed lemma image_mult_atLeastAtMost_if': "(\x. x * c) ` {x..y} = (if x \ y then if c > 0 then {x * c .. y * c} else {y * c .. x * c} else {})" using image_mult_atLeastAtMost_if [of c x y] by (auto simp add: ac_simps) lemma image_affinity_atLeastAtMost: "((\x. m * x + c) ` {a..b}) = (if {a..b} = {} then {} else if 0 \ m then {m * a + c .. m * b + c} else {m * b + c .. m * a + c})" proof - have *: "(\x. m * x + c) = ((\x. x + c) \ (*) m)" by (simp add: fun_eq_iff) show ?thesis by (simp only: * image_comp [symmetric] image_mult_atLeastAtMost_if) (auto simp add: mult_le_cancel_left) qed lemma image_affinity_atLeastAtMost_diff: "((\x. m*x - c) ` {a..b}) = (if {a..b}={} then {} else if 0 \ m then {m*a - c .. m*b - c} else {m*b - c .. m*a - c})" using image_affinity_atLeastAtMost [of m "-c" a b] by simp lemma image_affinity_atLeastAtMost_div: "((\x. x/m + c) ` {a..b}) = (if {a..b}={} then {} else if 0 \ m then {a/m + c .. b/m + c} else {b/m + c .. a/m + c})" using image_affinity_atLeastAtMost [of "inverse m" c a b] by (simp add: field_class.field_divide_inverse algebra_simps inverse_eq_divide) lemma image_affinity_atLeastAtMost_div_diff: "((\x. x/m - c) ` {a..b}) = (if {a..b}={} then {} else if 0 \ m then {a/m - c .. b/m - c} else {b/m - c .. a/m - c})" using image_affinity_atLeastAtMost_diff [of "inverse m" c a b] by (simp add: field_class.field_divide_inverse algebra_simps inverse_eq_divide) end lemma atLeast1_lessThan_eq_remove0: "{Suc 0..x. x + (l::int)) ` {0..i. i - c) ` {x ..< y} = (if c < y then {x - c ..< y - c} else if x < y then {0} else {})" (is "_ = ?right") proof safe fix a assume a: "a \ ?right" show "a \ (\i. i - c) ` {x ..< y}" proof cases assume "c < y" with a show ?thesis by (auto intro!: image_eqI[of _ _ "a + c"]) next assume "\ c < y" with a show ?thesis by (auto intro!: image_eqI[of _ _ x] split: if_split_asm) qed qed auto lemma image_int_atLeastLessThan: "int ` {a..Finiteness\ lemma finite_lessThan [iff]: fixes k :: nat shows "finite {..A bounded set of natural numbers is finite.\ lemma bounded_nat_set_is_finite: "(\i\N. i < (n::nat)) \ finite N" by (rule finite_subset [OF _ finite_lessThan]) auto text \A set of natural numbers is finite iff it is bounded.\ lemma finite_nat_set_iff_bounded: "finite(N::nat set) = (\m. \n\N. n?F\, simplified less_Suc_eq_le[symmetric]] by blast next assume ?B show ?F using \?B\ by(blast intro:bounded_nat_set_is_finite) qed lemma finite_nat_set_iff_bounded_le: "finite(N::nat set) = (\m. \n\N. n\m)" unfolding finite_nat_set_iff_bounded by (blast dest:less_imp_le_nat le_imp_less_Suc) lemma finite_less_ub: "!!f::nat=>nat. (!!n. n \ f n) ==> finite {n. f n \ u}" by (rule_tac B="{..u}" in finite_subset, auto intro: order_trans) lemma bounded_Max_nat: fixes P :: "nat \ bool" assumes x: "P x" and M: "\x. P x \ x \ M" obtains m where "P m" "\x. P x \ x \ m" proof - have "finite {x. P x}" using M finite_nat_set_iff_bounded_le by auto then have "Max {x. P x} \ {x. P x}" using Max_in x by auto then show ?thesis by (simp add: \finite {x. P x}\ that) qed text\Any subset of an interval of natural numbers the size of the subset is exactly that interval.\ lemma subset_card_intvl_is_intvl: assumes "A \ {k.. A" by auto with insert have "A \ {k..Proving Inclusions and Equalities between Unions\ lemma UN_le_eq_Un0: "(\i\n::nat. M i) = (\i\{1..n}. M i) \ M 0" (is "?A = ?B") proof show "?A \ ?B" proof fix x assume "x \ ?A" then obtain i where i: "i\n" "x \ M i" by auto show "x \ ?B" proof(cases i) case 0 with i show ?thesis by simp next case (Suc j) with i show ?thesis by auto qed qed next show "?B \ ?A" by fastforce qed lemma UN_le_add_shift: "(\i\n::nat. M(i+k)) = (\i\{k..n+k}. M i)" (is "?A = ?B") proof show "?A \ ?B" by fastforce next show "?B \ ?A" proof fix x assume "x \ ?B" then obtain i where i: "i \ {k..n+k}" "x \ M(i)" by auto hence "i-k\n \ x \ M((i-k)+k)" by auto thus "x \ ?A" by blast qed qed lemma UN_le_add_shift_strict: "(\ii\{k.. ?A" proof fix x assume "x \ ?B" then obtain i where i: "i \ {k.. M(i)" by auto then have "i - k < n \ x \ M((i-k) + k)" by auto then show "x \ ?A" using UN_le_add_shift by blast qed qed (fastforce) lemma UN_UN_finite_eq: "(\n::nat. \i\{0..n. A n)" by (auto simp add: atLeast0LessThan) lemma UN_finite_subset: "(\n::nat. (\i\{0.. C) \ (\n. A n) \ C" by (subst UN_UN_finite_eq [symmetric]) blast lemma UN_finite2_subset: assumes "\n::nat. (\i\{0.. (\i\{0..n. A n) \ (\n. B n)" proof (rule UN_finite_subset, rule) fix n and a from assms have "(\i\{0.. (\i\{0.. (\i\{0.. (\i\{0.. (\i. B i)" by (auto simp add: UN_UN_finite_eq) qed lemma UN_finite2_eq: "(\n::nat. (\i\{0..i\{0.. (\n. A n) = (\n. B n)" apply (rule subset_antisym [OF UN_finite_subset UN_finite2_subset]) apply auto apply (force simp add: atLeastLessThan_add_Un [of 0])+ done subsubsection \Cardinality\ lemma card_lessThan [simp]: "card {..x. x + l) ` {.. {0.. {0..n}" shows "finite N" using assms finite_atLeastAtMost by (rule finite_subset) lemma ex_bij_betw_nat_finite: "finite M \ \h. bij_betw h {0.. \h. bij_betw h M {0.. finite B \ card A = card B \ \h. bij_betw h A B" apply(drule ex_bij_betw_finite_nat) apply(drule ex_bij_betw_nat_finite) apply(auto intro!:bij_betw_trans) done lemma ex_bij_betw_nat_finite_1: "finite M \ \h. bij_betw h {1 .. card M} M" by (rule finite_same_card_bij) auto lemma bij_betw_iff_card: assumes "finite A" "finite B" shows "(\f. bij_betw f A B) \ (card A = card B)" proof assume "card A = card B" moreover obtain f where "bij_betw f A {0 ..< card A}" using assms ex_bij_betw_finite_nat by blast moreover obtain g where "bij_betw g {0 ..< card B} B" using assms ex_bij_betw_nat_finite by blast ultimately have "bij_betw (g \ f) A B" by (auto simp: bij_betw_trans) thus "(\f. bij_betw f A B)" by blast qed (auto simp: bij_betw_same_card) lemma subset_eq_atLeast0_lessThan_card: fixes n :: nat assumes "N \ {0.. n" proof - from assms finite_lessThan have "card N \ card {0..Relational version of @{thm [source] card_inj_on_le}:\ lemma card_le_if_inj_on_rel: assumes "finite B" "\a. a \ A \ \b. b\B \ r a b" "\a1 a2 b. \ a1 \ A; a2 \ A; b \ B; r a1 b; r a2 b \ \ a1 = a2" shows "card A \ card B" proof - let ?P = "\a b. b \ B \ r a b" let ?f = "\a. SOME b. ?P a b" have 1: "?f ` A \ B" by (auto intro: someI2_ex[OF assms(2)]) have "inj_on ?f A" proof (auto simp: inj_on_def) fix a1 a2 assume asms: "a1 \ A" "a2 \ A" "?f a1 = ?f a2" have 0: "?f a1 \ B" using "1" \a1 \ A\ by blast have 1: "r a1 (?f a1)" using someI_ex[OF assms(2)[OF \a1 \ A\]] by blast have 2: "r a2 (?f a1)" using someI_ex[OF assms(2)[OF \a2 \ A\]] asms(3) by auto show "a1 = a2" using assms(3)[OF asms(1,2) 0 1 2] . qed with 1 show ?thesis using card_inj_on_le[of ?f A B] assms(1) by simp qed lemma inj_on_funpow_least: \<^marker>\contributor \Lars Noschinski\\ \inj_on (\k. (f ^^ k) s) {0.. if \(f ^^ n) s = s\ \\m. 0 < m \ m < n \ (f ^^ m) s \ s\ proof - { fix k l assume A: "k < n" "l < n" "k \ l" "(f ^^ k) s = (f ^^ l) s" define k' l' where "k' = min k l" and "l' = max k l" with A have A': "k' < l'" "(f ^^ k') s = (f ^^ l') s" "l' < n" by (auto simp: min_def max_def) have "s = (f ^^ ((n - l') + l')) s" using that \l' < n\ by simp also have "\ = (f ^^ (n - l')) ((f ^^ l') s)" by (simp add: funpow_add) also have "(f ^^ l') s = (f ^^ k') s" by (simp add: A') also have "(f ^^ (n - l')) \ = (f ^^ (n - l' + k')) s" by (simp add: funpow_add) finally have "(f ^^ (n - l' + k')) s = s" by simp moreover have "n - l' + k' < n" "0 < n - l' + k'"using A' by linarith+ ultimately have False using that(2) by auto } then show ?thesis by (intro inj_onI) auto qed subsection \Intervals of integers\ lemma atLeastLessThanPlusOne_atLeastAtMost_int: "{l..Finiteness\ lemma image_atLeastZeroLessThan_int: "0 \ u ==> {(0::int).. u") case True then show ?thesis by (auto simp: image_atLeastZeroLessThan_int) qed auto lemma finite_atLeastLessThan_int [iff]: "finite {l..Cardinality\ lemma card_atLeastZeroLessThan_int: "card {(0::int).. u") case True then show ?thesis by (auto simp: image_atLeastZeroLessThan_int card_image inj_on_def) qed auto lemma card_atLeastLessThan_int [simp]: "card {l.. k < (i::nat)}" proof - have "{k. P k \ k < i} \ {.. M" shows "card {k \ M. k < Suc i} \ 0" proof - from zero_in_M have "{k \ M. k < Suc i} \ {}" by auto with finite_M_bounded_by_nat show ?thesis by (auto simp add: card_eq_0_iff) qed lemma card_less_Suc2: assumes "0 \ M" shows "card {k. Suc k \ M \ k < i} = card {k \ M. k < Suc i}" proof - have *: "\j \ M; j < Suc i\ \ j - Suc 0 < i \ Suc (j - Suc 0) \ M \ Suc 0 \ j" for j by (cases j) (use assms in auto) show ?thesis proof (rule card_bij_eq) show "inj_on Suc {k. Suc k \ M \ k < i}" by force show "inj_on (\x. x - Suc 0) {k \ M. k < Suc i}" by (rule inj_on_diff_nat) (use * in blast) qed (use * in auto) qed lemma card_less_Suc: assumes "0 \ M" shows "Suc (card {k. Suc k \ M \ k < i}) = card {k \ M. k < Suc i}" proof - have "Suc (card {k. Suc k \ M \ k < i}) = Suc (card {k. Suc k \ M - {0} \ k < i})" by simp also have "\ = Suc (card {k \ M - {0}. k < Suc i})" apply (subst card_less_Suc2) using assms by auto also have "\ = Suc (card ({k \ M. k < Suc i} - {0}))" by (force intro: arg_cong [where f=card]) also have "\ = card (insert 0 ({k \ M. k < Suc i} - {0}))" by (simp add: card.insert_remove) also have "... = card {k \ M. k < Suc i}" using assms by (force simp add: intro: arg_cong [where f=card]) finally show ?thesis. qed lemma card_le_Suc_Max: "finite S \ card S \ Suc (Max S)" proof (rule classical) assume "finite S" and "\ Suc (Max S) \ card S" then have "Suc (Max S) < card S" by simp - with `finite S` have "S \ {0..Max S}" + with \finite S\ have "S \ {0..Max S}" by auto hence "card S \ card {0..Max S}" by (intro card_mono; auto) thus "card S \ Suc (Max S)" by simp qed subsection \Lemmas useful with the summation operator sum\ text \For examples, see Algebra/poly/UnivPoly2.thy\ subsubsection \Disjoint Unions\ text \Singletons and open intervals\ lemma ivl_disj_un_singleton: "{l::'a::linorder} Un {l<..} = {l..}" "{.. {l} Un {l<.. {l<.. u ==> {l} Un {l<..u} = {l..u}" "(l::'a::linorder) \ u ==> {l..One- and two-sided intervals\ lemma ivl_disj_un_one: "(l::'a::linorder) < u ==> {..l} Un {l<.. u ==> {.. u ==> {..l} Un {l<..u} = {..u}" "(l::'a::linorder) \ u ==> {.. u ==> {l<..u} Un {u<..} = {l<..}" "(l::'a::linorder) < u ==> {l<.. u ==> {l..u} Un {u<..} = {l..}" "(l::'a::linorder) \ u ==> {l..Two- and two-sided intervals\ lemma ivl_disj_un_two: "[| (l::'a::linorder) < m; m \ u |] ==> {l<.. m; m < u |] ==> {l<..m} Un {m<.. m; m \ u |] ==> {l.. m; m < u |] ==> {l..m} Un {m<.. u |] ==> {l<.. m; m \ u |] ==> {l<..m} Un {m<..u} = {l<..u}" "[| (l::'a::linorder) \ m; m \ u |] ==> {l.. m; m \ u |] ==> {l..m} Un {m<..u} = {l..u}" by auto lemma ivl_disj_un_two_touch: "[| (l::'a::linorder) < m; m < u |] ==> {l<..m} Un {m.. m; m < u |] ==> {l..m} Un {m.. u |] ==> {l<..m} Un {m..u} = {l<..u}" "[| (l::'a::linorder) \ m; m \ u |] ==> {l..m} Un {m..u} = {l..u}" by auto lemmas ivl_disj_un = ivl_disj_un_singleton ivl_disj_un_one ivl_disj_un_two ivl_disj_un_two_touch subsubsection \Disjoint Intersections\ text \One- and two-sided intervals\ lemma ivl_disj_int_one: "{..l::'a::order} Int {l<..Two- and two-sided intervals\ lemma ivl_disj_int_two: "{l::'a::order<..Some Differences\ lemma ivl_diff[simp]: "i \ n \ {i..Some Subset Conditions\ lemma ivl_subset [simp]: "({i.. {m.. i \ m \ i \ j \ (n::'a::linorder))" using linorder_class.le_less_linear[of i n] apply (auto simp: linorder_not_le) apply (force intro: leI)+ done lemma obtain_subset_with_card_n: assumes "n \ card S" obtains T where "T \ S" "card T = n" "finite T" proof - obtain n' where "card S = n + n'" by (metis assms le_add_diff_inverse) with that show thesis proof (induct n' arbitrary: S) case 0 then show ?case by (cases "finite S") auto next case Suc then show ?case by (simp add: card_Suc_eq) (metis subset_insertI2) qed qed subsection \Generic big monoid operation over intervals\ context semiring_char_0 begin lemma inj_on_of_nat [simp]: "inj_on of_nat N" by rule simp lemma bij_betw_of_nat [simp]: "bij_betw of_nat N A \ of_nat ` N = A" by (simp add: bij_betw_def) end context comm_monoid_set begin lemma atLeastLessThan_reindex: "F g {h m.. h) {m.. h) {m..n}" if "bij_betw h {m..n} {h m..h n}" for m n ::nat proof - from that have "inj_on h {m..n}" and "h ` {m..n} = {h m..h n}" by (simp_all add: bij_betw_def) then show ?thesis using reindex [of h "{m..n}" g] by simp qed lemma atLeastLessThan_shift_bounds: "F g {m + k.. plus k) {m.. plus k) {m..n}" for m n k :: nat using atLeastAtMost_reindex [of "plus k" m n g] by (simp add: ac_simps) lemma atLeast_Suc_lessThan_Suc_shift: "F g {Suc m.. Suc) {m.. Suc) {m..n}" using atLeastAtMost_shift_bounds [of _ _ 1] by (simp add: plus_1_eq_Suc) lemma atLeast_int_lessThan_int_shift: "F g {int m.. int) {m.. int) {m..n}" by (rule atLeastAtMost_reindex) (simp add: image_int_atLeastAtMost) lemma atLeast0_lessThan_Suc: "F g {0..* g n" by (simp add: atLeast0_lessThan_Suc ac_simps) lemma atLeast0_atMost_Suc: "F g {0..Suc n} = F g {0..n} \<^bold>* g (Suc n)" by (simp add: atLeast0_atMost_Suc ac_simps) lemma atLeast0_lessThan_Suc_shift: "F g {0..* F (g \ Suc) {0..* F (g \ Suc) {0..n}" by (simp add: atLeast0_atMost_Suc_eq_insert_0 atLeast_Suc_atMost_Suc_shift) lemma atLeast_Suc_lessThan: "F g {m..* F g {Suc m..* F g {Suc m..n}" if "m \ n" proof - from that have "{m..n} = insert m {Suc m..n}" by auto then show ?thesis by simp qed lemma ivl_cong: "a = c \ b = d \ (\x. c \ x \ x < d \ g x = h x) \ F g {a.. plus m) {0.. n") simp_all lemma atLeastAtMost_shift_0: fixes m n p :: nat assumes "m \ n" shows "F g {m..n} = F (g \ plus m) {0..n - m}" using assms atLeastAtMost_shift_bounds [of g 0 m "n - m"] by simp lemma atLeastLessThan_concat: fixes m n p :: nat shows "m \ n \ n \ p \ F g {m..* F g {n..i. g (m + n - Suc i)) {n..i. g (m + n - i)) {n..m}" by (rule reindex_bij_witness [where i="\i. m + n - i" and j="\i. m + n - i"]) auto lemma atLeastLessThan_rev_at_least_Suc_atMost: "F g {n..i. g (m + n - i)) {Suc n..m}" unfolding atLeastLessThan_rev [of g n m] by (cases m) (simp_all add: atLeast_Suc_atMost_Suc_shift atLeastLessThanSuc_atLeastAtMost) end subsection \Summation indexed over intervals\ syntax (ASCII) "_from_to_sum" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(SUM _ = _.._./ _)" [0,0,0,10] 10) "_from_upto_sum" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(SUM _ = _..<_./ _)" [0,0,0,10] 10) "_upt_sum" :: "idt \ 'a \ 'b \ 'b" ("(SUM _<_./ _)" [0,0,10] 10) "_upto_sum" :: "idt \ 'a \ 'b \ 'b" ("(SUM _<=_./ _)" [0,0,10] 10) syntax (latex_sum output) "_from_to_sum" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(3\<^latex>\$\\sum_{\_ = _\<^latex>\}^{\_\<^latex>\}$\ _)" [0,0,0,10] 10) "_from_upto_sum" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(3\<^latex>\$\\sum_{\_ = _\<^latex>\}^{<\_\<^latex>\}$\ _)" [0,0,0,10] 10) "_upt_sum" :: "idt \ 'a \ 'b \ 'b" ("(3\<^latex>\$\\sum_{\_ < _\<^latex>\}$\ _)" [0,0,10] 10) "_upto_sum" :: "idt \ 'a \ 'b \ 'b" ("(3\<^latex>\$\\sum_{\_ \ _\<^latex>\}$\ _)" [0,0,10] 10) syntax "_from_to_sum" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(3\_ = _.._./ _)" [0,0,0,10] 10) "_from_upto_sum" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(3\_ = _..<_./ _)" [0,0,0,10] 10) "_upt_sum" :: "idt \ 'a \ 'b \ 'b" ("(3\_<_./ _)" [0,0,10] 10) "_upto_sum" :: "idt \ 'a \ 'b \ 'b" ("(3\_\_./ _)" [0,0,10] 10) translations "\x=a..b. t" == "CONST sum (\x. t) {a..b}" "\x=a..x. t) {a..i\n. t" == "CONST sum (\i. t) {..n}" "\ii. t) {..The above introduces some pretty alternative syntaxes for summation over intervals: \begin{center} \begin{tabular}{lll} Old & New & \LaTeX\\ @{term[source]"\x\{a..b}. e"} & \<^term>\\x=a..b. e\ & @{term[mode=latex_sum]"\x=a..b. e"}\\ @{term[source]"\x\{a..\\x=a.. & @{term[mode=latex_sum]"\x=a..x\{..b}. e"} & \<^term>\\x\b. e\ & @{term[mode=latex_sum]"\x\b. e"}\\ @{term[source]"\x\{..\\x & @{term[mode=latex_sum]"\xlatex_sum\ (e.g.\ via \mode = latex_sum\ in antiquotations). It is not the default \LaTeX\ output because it only works well with italic-style formulae, not tt-style. Note that for uniformity on \<^typ>\nat\ it is better to use \<^term>\\x::nat=0.. rather than \\x: \sum\ may not provide all lemmas available for \<^term>\{m.. also in the special form for \<^term>\{...\ text\This congruence rule should be used for sums over intervals as the standard theorem @{text[source]sum.cong} does not work well with the simplifier who adds the unsimplified premise \<^term>\x\B\ to the context.\ context comm_monoid_set begin lemma zero_middle: assumes "1 \ p" "k \ p" shows "F (\j. if j < k then g j else if j = k then \<^bold>1 else h (j - Suc 0)) {..p} = F (\j. if j < k then g j else h j) {..p - Suc 0}" (is "?lhs = ?rhs") proof - have [simp]: "{..p - Suc 0} \ {j. j < k} = {.. - {j. j < k} = {k..p - Suc 0}" using assms by auto have "?lhs = F g {..* F (\j. if j = k then \<^bold>1 else h (j - Suc 0)) {k..p}" using union_disjoint [of "{.. = F g {..* F (\j. h (j - Suc 0)) {Suc k..p}" by (simp add: atLeast_Suc_atMost [of k p] assms) also have "\ = F g {..* F h {k .. p - Suc 0}" using reindex [of Suc "{k..p - Suc 0}"] assms by simp also have "\ = ?rhs" by (simp add: If_cases) finally show ?thesis . qed lemma atMost_Suc [simp]: "F g {..Suc n} = F g {..n} \<^bold>* g (Suc n)" by (simp add: atMost_Suc ac_simps) lemma lessThan_Suc [simp]: "F g {..* g n" by (simp add: lessThan_Suc ac_simps) lemma cl_ivl_Suc [simp]: "F g {m..Suc n} = (if Suc n < m then \<^bold>1 else F g {m..n} \<^bold>* g(Suc n))" by (auto simp: ac_simps atLeastAtMostSuc_conv) lemma op_ivl_Suc [simp]: "F g {m..1 else F g {m..* g(n))" by (auto simp: ac_simps atLeastLessThanSuc) lemma head: fixes n :: nat assumes mn: "m \ n" shows "F g {m..n} = g m \<^bold>* F g {m<..n}" (is "?lhs = ?rhs") proof - from mn have "{m..n} = {m} \ {m<..n}" by (auto intro: ivl_disj_un_singleton) hence "?lhs = F g ({m} \ {m<..n})" by (simp add: atLeast0LessThan) also have "\ = ?rhs" by simp finally show ?thesis . qed lemma last_plus: fixes n::nat shows "m \ n \ F g {m..n} = g n \<^bold>* F g {m..1 else F g {m..* g(n))" by (simp add: commute last_plus) lemma ub_add_nat: assumes "(m::nat) \ n + 1" shows "F g {m..n + p} = F g {m..n} \<^bold>* F g {n + 1..n + p}" proof- have "{m .. n+p} = {m..n} \ {n+1..n+p}" using \m \ n+1\ by auto thus ?thesis by (auto simp: ivl_disj_int union_disjoint atLeastSucAtMost_greaterThanAtMost) qed lemma nat_group: fixes k::nat shows "F (\m. F g {m * k ..< m*k + k}) {.. 0" by auto then show ?thesis by (induct n) (simp_all add: atLeastLessThan_concat add.commute atLeast0LessThan[symmetric]) qed auto lemma triangle_reindex: fixes n :: nat shows "F (\(i,j). g i j) {(i,j). i+j < n} = F (\k. F (\i. g i (k - i)) {..k}) {..(i,j). g i j) {(i,j). i+j \ n} = F (\k. F (\i. g i (k - i)) {..k}) {..n}" using triangle_reindex [of g "Suc n"] by (simp only: Nat.less_Suc_eq_le lessThan_Suc_atMost) lemma nat_diff_reindex: "F (\i. g (n - Suc i)) {..i. g(i + k)){m..i. g(i + k)){m..n::nat}" by (rule reindex_bij_witness[where i="\i. i + k" and j="\i. i - k"]) auto corollary shift_bounds_cl_Suc_ivl: "F g {Suc m..Suc n} = F (\i. g(Suc i)){m..n}" by (simp add: shift_bounds_cl_nat_ivl[where k="Suc 0", simplified]) corollary Suc_reindex_ivl: "m \ n \ F g {m..n} \<^bold>* g (Suc n) = g m \<^bold>* F (\i. g (Suc i)) {m..n}" by (simp add: assoc atLeast_Suc_atMost flip: shift_bounds_cl_Suc_ivl) corollary shift_bounds_Suc_ivl: "F g {Suc m..i. g(Suc i)){m..* F (\i. g (Suc i)) {..n}" proof (induct n) case 0 show ?case by simp next case (Suc n) note IH = this have "F g {..Suc (Suc n)} = F g {..Suc n} \<^bold>* g (Suc (Suc n))" by (rule atMost_Suc) also have "F g {..Suc n} = g 0 \<^bold>* F (\i. g (Suc i)) {..n}" by (rule IH) also have "g 0 \<^bold>* F (\i. g (Suc i)) {..n} \<^bold>* g (Suc (Suc n)) = g 0 \<^bold>* (F (\i. g (Suc i)) {..n} \<^bold>* g (Suc (Suc n)))" by (rule assoc) also have "F (\i. g (Suc i)) {..n} \<^bold>* g (Suc (Suc n)) = F (\i. g (Suc i)) {..Suc n}" by (rule atMost_Suc [symmetric]) finally show ?case . qed lemma lessThan_Suc_shift: "F g {..* F (\i. g (Suc i)) {..* F (\i. g (Suc i)) {..i. F (\j. a i j) {0..j. F (\i. a i j) {Suc j..n}) {0..i. F (\j. a i j) {..j. F (\i. a i j) {Suc j..n}) {..k. g (Suc k)) {.. = F (\k. g (Suc k)) {.. b \ F g {a..* g b" by (simp add: atLeastLessThanSuc commute) lemma nat_ivl_Suc': assumes "m \ Suc n" shows "F g {m..Suc n} = g (Suc n) \<^bold>* F g {m..n}" proof - from assms have "{m..Suc n} = insert (Suc n) {m..n}" by auto also have "F g \ = g (Suc n) \<^bold>* F g {m..n}" by simp finally show ?thesis . qed lemma in_pairs: "F g {2*m..Suc(2*n)} = F (\i. g(2*i) \<^bold>* g(Suc(2*i))) {m..n}" proof (induction n) case 0 show ?case by (cases "m=0") auto next case (Suc n) then show ?case by (auto simp: assoc split: if_split_asm) qed lemma in_pairs_0: "F g {..Suc(2*n)} = F (\i. g(2*i) \<^bold>* g(Suc(2*i))) {..n}" using in_pairs [of _ 0 n] by (simp add: atLeast0AtMost) end lemma card_sum_le_nat_sum: "\ {0.. \ S" proof (cases "finite S") case True then show ?thesis proof (induction "card S" arbitrary: S) case (Suc x) then have "Max S \ x" using card_le_Suc_Max by fastforce let ?S' = "S - {Max S}" from Suc have "Max S \ S" by (auto intro: Max_in) hence cards: "card S = Suc (card ?S')" - using `finite S` by (intro card.remove; auto) + using \finite S\ by (intro card.remove; auto) hence "\ {0.. \ ?S'" using Suc by (intro Suc; auto) hence "\ {0.. \ ?S' + Max S" - using `Max S \ x` by simp + using \Max S \ x\ by simp also have "... = \ S" - using sum.remove[OF `finite S` `Max S \ S`, where g="\x. x"] + using sum.remove[OF \finite S\ \Max S \ S\, where g="\x. x"] by simp finally show ?case using cards Suc by auto qed simp qed simp lemma sum_natinterval_diff: fixes f:: "nat \ ('a::ab_group_add)" shows "sum (\k. f k - f(k + 1)) {(m::nat) .. n} = (if m \ n then f m - f(n + 1) else 0)" by (induct n, auto simp add: algebra_simps not_le le_Suc_eq) lemma sum_diff_nat_ivl: fixes f :: "nat \ 'a::ab_group_add" shows "\ m \ n; n \ p \ \ sum f {m..x. Q x \ P x \ (\xxxShifting bounds\ context comm_monoid_add begin context fixes f :: "nat \ 'a" assumes "f 0 = 0" begin lemma sum_shift_lb_Suc0_0_upt: "sum f {Suc 0..f 0 = 0\ by simp qed lemma sum_shift_lb_Suc0_0: "sum f {Suc 0..k} = sum f {0..k}" proof (cases k) case 0 with \f 0 = 0\ show ?thesis by simp next case (Suc k) moreover have "{0..Suc k} = insert 0 {Suc 0..Suc k}" by auto ultimately show ?thesis using \f 0 = 0\ by simp qed end end lemma sum_Suc_diff: fixes f :: "nat \ 'a::ab_group_add" assumes "m \ Suc n" shows "(\i = m..n. f(Suc i) - f i) = f (Suc n) - f m" using assms by (induct n) (auto simp: le_Suc_eq) lemma sum_Suc_diff': fixes f :: "nat \ 'a::ab_group_add" assumes "m \ n" shows "(\i = m..Telescoping\ lemma sum_telescope: fixes f::"nat \ 'a::ab_group_add" shows "sum (\i. f i - f (Suc i)) {.. i} = f 0 - f (Suc i)" by (induct i) simp_all lemma sum_telescope'': assumes "m \ n" shows "(\k\{Suc m..n}. f k - f (k - 1)) = f n - (f m :: 'a :: ab_group_add)" by (rule dec_induct[OF assms]) (simp_all add: algebra_simps) lemma sum_lessThan_telescope: "(\nnThe formula for geometric sums\ lemma sum_power2: "(\i=0.. 1" shows "(\i 0" by simp_all moreover have "(\iy \ 0\) ultimately show ?thesis by simp qed lemma diff_power_eq_sum: fixes y :: "'a::{comm_ring,monoid_mult}" shows "x ^ (Suc n) - y ^ (Suc n) = (x - y) * (\pppp \\COMPLEX_POLYFUN\ in HOL Light\ fixes x :: "'a::{comm_ring,monoid_mult}" shows "x^n - y^n = (x - y) * (\iiiii\n. x^i) = 1 - x^Suc n" by (simp only: one_diff_power_eq lessThan_Suc_atMost) lemma sum_power_shift: fixes x :: "'a::{comm_ring,monoid_mult}" assumes "m \ n" shows "(\i=m..n. x^i) = x^m * (\i\n-m. x^i)" proof - have "(\i=m..n. x^i) = x^m * (\i=m..n. x^(i-m))" by (simp add: sum_distrib_left power_add [symmetric]) also have "(\i=m..n. x^(i-m)) = (\i\n-m. x^i)" using \m \ n\ by (intro sum.reindex_bij_witness[where j="\i. i - m" and i="\i. i + m"]) auto finally show ?thesis . qed lemma sum_gp_multiplied: fixes x :: "'a::{comm_ring,monoid_mult}" assumes "m \ n" shows "(1 - x) * (\i=m..n. x^i) = x^m - x^Suc n" proof - have "(1 - x) * (\i=m..n. x^i) = x^m * (1 - x) * (\i\n-m. x^i)" by (metis mult.assoc mult.commute assms sum_power_shift) also have "... =x^m * (1 - x^Suc(n-m))" by (metis mult.assoc sum_gp_basic) also have "... = x^m - x^Suc n" using assms by (simp add: algebra_simps) (metis le_add_diff_inverse power_add) finally show ?thesis . qed lemma sum_gp: fixes x :: "'a::{comm_ring,division_ring}" shows "(\i=m..n. x^i) = (if n < m then 0 else if x = 1 then of_nat((n + 1) - m) else (x^m - x^Suc n) / (1 - x))" using sum_gp_multiplied [of m n x] apply auto by (metis eq_iff_diff_eq_0 mult.commute nonzero_divide_eq_eq) subsubsection\Geometric progressions\ lemma sum_gp0: fixes x :: "'a::{comm_ring,division_ring}" shows "(\i\n. x^i) = (if x = 1 then of_nat(n + 1) else (1 - x^Suc n) / (1 - x))" using sum_gp_basic[of x n] by (simp add: mult.commute field_split_simps) lemma sum_power_add: fixes x :: "'a::{comm_ring,monoid_mult}" shows "(\i\I. x^(m+i)) = x^m * (\i\I. x^i)" by (simp add: sum_distrib_left power_add) lemma sum_gp_offset: fixes x :: "'a::{comm_ring,division_ring}" shows "(\i=m..m+n. x^i) = (if x = 1 then of_nat n + 1 else x^m * (1 - x^Suc n) / (1 - x))" using sum_gp [of x m "m+n"] by (auto simp: power_add algebra_simps) lemma sum_gp_strict: fixes x :: "'a::{comm_ring,division_ring}" shows "(\iThe formulae for arithmetic sums\ context comm_semiring_1 begin lemma double_gauss_sum: "2 * (\i = 0..n. of_nat i) = of_nat n * (of_nat n + 1)" by (induct n) (simp_all add: sum.atLeast0_atMost_Suc algebra_simps left_add_twice) lemma double_gauss_sum_from_Suc_0: "2 * (\i = Suc 0..n. of_nat i) = of_nat n * (of_nat n + 1)" proof - have "sum of_nat {Suc 0..n} = sum of_nat (insert 0 {Suc 0..n})" by simp also have "\ = sum of_nat {0..n}" by (cases n) (simp_all add: atLeast0_atMost_Suc_eq_insert_0) finally show ?thesis by (simp add: double_gauss_sum) qed lemma double_arith_series: "2 * (\i = 0..n. a + of_nat i * d) = (of_nat n + 1) * (2 * a + of_nat n * d)" proof - have "(\i = 0..n. a + of_nat i * d) = ((\i = 0..n. a) + (\i = 0..n. of_nat i * d))" by (rule sum.distrib) also have "\ = (of_nat (Suc n) * a + d * (\i = 0..n. of_nat i))" by (simp add: sum_distrib_left algebra_simps) finally show ?thesis by (simp add: algebra_simps double_gauss_sum left_add_twice) qed end context unique_euclidean_semiring_with_nat begin lemma gauss_sum: "(\i = 0..n. of_nat i) = of_nat n * (of_nat n + 1) div 2" using double_gauss_sum [of n, symmetric] by simp lemma gauss_sum_from_Suc_0: "(\i = Suc 0..n. of_nat i) = of_nat n * (of_nat n + 1) div 2" using double_gauss_sum_from_Suc_0 [of n, symmetric] by simp lemma arith_series: "(\i = 0..n. a + of_nat i * d) = (of_nat n + 1) * (2 * a + of_nat n * d) div 2" using double_arith_series [of a d n, symmetric] by simp end lemma gauss_sum_nat: "\{0..n} = (n * Suc n) div 2" using gauss_sum [of n, where ?'a = nat] by simp lemma arith_series_nat: "(\i = 0..n. a + i * d) = Suc n * (2 * a + n * d) div 2" using arith_series [of a d n] by simp lemma Sum_Icc_int: "\{m..n} = (n * (n + 1) - m * (m - 1)) div 2" if "m \ n" for m n :: int using that proof (induct i \ "nat (n - m)" arbitrary: m n) case 0 then have "m = n" by arith then show ?case by (simp add: algebra_simps mult_2 [symmetric]) next case (Suc i) have 0: "i = nat((n-1) - m)" "m \ n-1" using Suc(2,3) by arith+ have "\ {m..n} = \ {m..1+(n-1)}" by simp also have "\ = \ {m..n-1} + n" using \m \ n\ by(subst atLeastAtMostPlus1_int_conv) simp_all also have "\ = ((n-1)*(n-1+1) - m*(m-1)) div 2 + n" by(simp add: Suc(1)[OF 0]) also have "\ = ((n-1)*(n-1+1) - m*(m-1) + 2*n) div 2" by simp also have "\ = (n*(n+1) - m*(m-1)) div 2" by (simp add: algebra_simps mult_2_right) finally show ?case . qed lemma Sum_Icc_nat: "\{m..n} = (n * (n + 1) - m * (m - 1)) div 2" for m n :: nat proof (cases "m \ n") case True then have *: "m * (m - 1) \ n * (n + 1)" by (meson diff_le_self order_trans le_add1 mult_le_mono) have "int (\{m..n}) = (\{int m..int n})" by (simp add: sum.atLeast_int_atMost_int_shift) also have "\ = (int n * (int n + 1) - int m * (int m - 1)) div 2" using \m \ n\ by (simp add: Sum_Icc_int) also have "\ = int ((n * (n + 1) - m * (m - 1)) div 2)" using le_square * by (simp add: algebra_simps of_nat_div of_nat_diff) finally show ?thesis by (simp only: of_nat_eq_iff) next case False then show ?thesis by (auto dest: less_imp_Suc_add simp add: not_le algebra_simps) qed lemma Sum_Ico_nat: "\{m..Division remainder\ lemma range_mod: fixes n :: nat assumes "n > 0" shows "range (\m. m mod n) = {0.. ?A \ m \ ?B" proof assume "m \ ?A" with assms show "m \ ?B" by auto next assume "m \ ?B" moreover have "m mod n \ ?A" by (rule rangeI) ultimately show "m \ ?A" by simp qed qed subsection \Products indexed over intervals\ syntax (ASCII) "_from_to_prod" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(PROD _ = _.._./ _)" [0,0,0,10] 10) "_from_upto_prod" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(PROD _ = _..<_./ _)" [0,0,0,10] 10) "_upt_prod" :: "idt \ 'a \ 'b \ 'b" ("(PROD _<_./ _)" [0,0,10] 10) "_upto_prod" :: "idt \ 'a \ 'b \ 'b" ("(PROD _<=_./ _)" [0,0,10] 10) syntax (latex_prod output) "_from_to_prod" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(3\<^latex>\$\\prod_{\_ = _\<^latex>\}^{\_\<^latex>\}$\ _)" [0,0,0,10] 10) "_from_upto_prod" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(3\<^latex>\$\\prod_{\_ = _\<^latex>\}^{<\_\<^latex>\}$\ _)" [0,0,0,10] 10) "_upt_prod" :: "idt \ 'a \ 'b \ 'b" ("(3\<^latex>\$\\prod_{\_ < _\<^latex>\}$\ _)" [0,0,10] 10) "_upto_prod" :: "idt \ 'a \ 'b \ 'b" ("(3\<^latex>\$\\prod_{\_ \ _\<^latex>\}$\ _)" [0,0,10] 10) syntax "_from_to_prod" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(3\_ = _.._./ _)" [0,0,0,10] 10) "_from_upto_prod" :: "idt \ 'a \ 'a \ 'b \ 'b" ("(3\_ = _..<_./ _)" [0,0,0,10] 10) "_upt_prod" :: "idt \ 'a \ 'b \ 'b" ("(3\_<_./ _)" [0,0,10] 10) "_upto_prod" :: "idt \ 'a \ 'b \ 'b" ("(3\_\_./ _)" [0,0,10] 10) translations "\x=a..b. t" \ "CONST prod (\x. t) {a..b}" "\x=a.. "CONST prod (\x. t) {a..i\n. t" \ "CONST prod (\i. t) {..n}" "\i "CONST prod (\i. t) {..{int i..int (i+j)}" by (induct j) (auto simp add: atLeastAtMostSuc_conv atLeastAtMostPlus1_int_conv) lemma prod_int_eq: "prod int {i..j} = \{int i..int j}" proof (cases "i \ j") case True then show ?thesis by (metis le_iff_add prod_int_plus_eq) next case False then show ?thesis by auto qed subsection \Efficient folding over intervals\ function fold_atLeastAtMost_nat where [simp del]: "fold_atLeastAtMost_nat f a (b::nat) acc = (if a > b then acc else fold_atLeastAtMost_nat f (a+1) b (f a acc))" by pat_completeness auto termination by (relation "measure (\(_,a,b,_). Suc b - a)") auto lemma fold_atLeastAtMost_nat: assumes "comp_fun_commute f" shows "fold_atLeastAtMost_nat f a b acc = Finite_Set.fold f acc {a..b}" using assms proof (induction f a b acc rule: fold_atLeastAtMost_nat.induct, goal_cases) case (1 f a b acc) interpret comp_fun_commute f by fact show ?case proof (cases "a > b") case True thus ?thesis by (subst fold_atLeastAtMost_nat.simps) auto next case False with 1 show ?thesis by (subst fold_atLeastAtMost_nat.simps) (auto simp: atLeastAtMost_insertL[symmetric] fold_fun_left_comm) qed qed lemma sum_atLeastAtMost_code: "sum f {a..b} = fold_atLeastAtMost_nat (\a acc. f a + acc) a b 0" proof - have "comp_fun_commute (\a. (+) (f a))" by unfold_locales (auto simp: o_def add_ac) thus ?thesis by (simp add: sum.eq_fold fold_atLeastAtMost_nat o_def) qed lemma prod_atLeastAtMost_code: "prod f {a..b} = fold_atLeastAtMost_nat (\a acc. f a * acc) a b 1" proof - have "comp_fun_commute (\a. (*) (f a))" by unfold_locales (auto simp: o_def mult_ac) thus ?thesis by (simp add: prod.eq_fold fold_atLeastAtMost_nat o_def) qed (* TODO: Add support for folding over more kinds of intervals here *) end diff --git a/src/HOL/TPTP/ATP_Theory_Export.thy b/src/HOL/TPTP/ATP_Theory_Export.thy --- a/src/HOL/TPTP/ATP_Theory_Export.thy +++ b/src/HOL/TPTP/ATP_Theory_Export.thy @@ -1,70 +1,71 @@ (* Title: HOL/TPTP/ATP_Theory_Export.thy Author: Jasmin Blanchette, TU Muenchen *) section \ATP Theory Exporter\ theory ATP_Theory_Export imports Complex_Main begin ML_file \atp_theory_export.ML\ ML \ open ATP_Problem open ATP_Theory_Export \ ML \ val do_it = false (* switch to "true" to generate the files *) val ctxt = \<^context> val thy = \<^theory>\Complex_Main\ val infer_policy = (* Unchecked_Inferences *) No_Inferences \ ML \ if do_it then "/tmp/isa_filter" |> generate_casc_lbt_isa_files_for_theory ctxt thy - (THF (Without_FOOL, Polymorphic, THF_Without_Choice)) infer_policy "poly_native_higher" + (THF (Polymorphic, {with_ite = false, with_let = false}, THF_Without_Choice)) infer_policy + "poly_native_higher" else () \ ML \ if do_it then "/tmp/axs_tc_native.dfg" |> generate_atp_inference_file_for_theory ctxt thy (DFG Polymorphic) infer_policy "tc_native" else () \ ML \ if do_it then "/tmp/infs_poly_guards_query_query.tptp" |> generate_atp_inference_file_for_theory ctxt thy FOF infer_policy "poly_guards??" else () \ ML \ if do_it then "/tmp/infs_poly_tags_query_query.tptp" |> generate_atp_inference_file_for_theory ctxt thy FOF infer_policy "poly_tags??" else () \ ML \ if do_it then "/tmp/casc_ltb_isa" |> generate_casc_lbt_isa_files_for_theory ctxt thy FOF infer_policy "poly_tags??" else () \ end diff --git a/src/HOL/TPTP/atp_problem_import.ML b/src/HOL/TPTP/atp_problem_import.ML --- a/src/HOL/TPTP/atp_problem_import.ML +++ b/src/HOL/TPTP/atp_problem_import.ML @@ -1,326 +1,324 @@ (* Title: HOL/TPTP/atp_problem_import.ML Author: Jasmin Blanchette, TU Muenchen Copyright 2012 Import TPTP problems as Isabelle terms or goals. *) signature ATP_PROBLEM_IMPORT = sig val read_tptp_file : theory -> (string * term -> 'a) -> string -> 'a list * ('a list * 'a list) * local_theory val nitpick_tptp_file : theory -> int -> string -> unit val refute_tptp_file : theory -> int -> string -> unit val can_tac : Proof.context -> (Proof.context -> tactic) -> term -> bool val SOLVE_TIMEOUT : int -> string -> tactic -> tactic val atp_tac : local_theory -> int -> (string * string) list -> int -> term list -> string -> int -> tactic val smt_solver_tac : string -> local_theory -> int -> tactic val make_conj : term list * term list -> term list -> term val sledgehammer_tptp_file : theory -> int -> string -> unit val isabelle_tptp_file : theory -> int -> string -> unit val isabelle_hot_tptp_file : theory -> int -> string -> unit val translate_tptp_file : theory -> string -> string -> unit end; structure ATP_Problem_Import : ATP_PROBLEM_IMPORT = struct open ATP_Util open ATP_Problem open ATP_Proof open ATP_Problem_Generate val debug = false val overlord = false (** TPTP parsing **) fun exploded_absolute_path file_name = Path.explode file_name |> (fn path => path |> not (Path.is_absolute path) ? Path.append (Path.explode "$PWD")) fun read_tptp_file thy postproc file_name = let fun has_role role (_, role', _, _) = (role' = role) fun get_prop f (name, _, P, _) = P |> f |> close_form |> pair name |> postproc val path = exploded_absolute_path file_name val ((_, _, problem), thy) = TPTP_Interpret.interpret_file true [Path.dir path, Path.explode "$TPTP"] path [] [] thy val (conjs, defs_and_nondefs) = problem |> List.partition (has_role TPTP_Syntax.Role_Conjecture) ||> List.partition (has_role TPTP_Syntax.Role_Definition) in (map (get_prop I) conjs, apply2 (map (get_prop Logic.varify_global)) defs_and_nondefs, Named_Target.theory_init thy) end (** Nitpick **) fun aptrueprop f ((t0 as \<^Const_>\Trueprop\) $ t1) = t0 $ f t1 | aptrueprop f t = f t fun is_legitimate_tptp_def \<^Const_>\Trueprop for t\ = is_legitimate_tptp_def t | is_legitimate_tptp_def \<^Const_>\HOL.eq _ for t u\ = (is_Const t orelse is_Free t) andalso not (exists_subterm (curry (op =) t) u) | is_legitimate_tptp_def _ = false fun nitpick_tptp_file thy timeout file_name = let val (conjs, (defs, nondefs), lthy) = read_tptp_file thy snd file_name val thy = Proof_Context.theory_of lthy val (defs, pseudo_defs) = defs |> map (ATP_Util.abs_extensionalize_term lthy #> aptrueprop (hol_open_form I)) |> List.partition (is_legitimate_tptp_def o perhaps (try HOLogic.dest_Trueprop) o ATP_Util.unextensionalize_def) val nondefs = pseudo_defs @ nondefs val state = Proof.init lthy val params = [("card", "1-100"), ("box", "false"), ("max_threads", "1"), ("batch_size", "5"), ("falsify", if null conjs then "false" else "true"), ("verbose", "true"), ("debug", if debug then "true" else "false"), ("overlord", if overlord then "true" else "false"), ("show_consts", "true"), ("format", "1"), ("max_potential", "0"), ("timeout", string_of_int timeout), ("tac_timeout", string_of_int ((timeout + 49) div 50))] |> Nitpick_Commands.default_params thy val i = 1 val n = 1 val step = 0 val subst = [] in Nitpick.pick_nits_in_term state params Nitpick.TPTP i n step subst defs nondefs (case conjs of conj :: _ => conj | [] => \<^prop>\True\); () end (** Refute **) fun refute_tptp_file thy timeout file_name = let fun print_szs_of_outcome falsify s = "% SZS status " ^ (if s = "genuine" then if falsify then "CounterSatisfiable" else "Satisfiable" else "GaveUp") |> writeln val (conjs, (defs, nondefs), lthy) = read_tptp_file thy snd file_name val params = [("maxtime", string_of_int timeout), ("maxvars", "100000")] in Refute.refute_term lthy params (defs @ nondefs) (case conjs of conj :: _ => conj | [] => \<^prop>\True\) |> print_szs_of_outcome (not (null conjs)) end (** Sledgehammer and Isabelle (combination of provers) **) fun can_tac ctxt tactic conj = can (Goal.prove_internal ctxt [] (Thm.cterm_of ctxt conj)) (fn [] => tactic ctxt) fun SOLVE_TIMEOUT seconds name tac st = let val _ = writeln ("running " ^ name ^ " for " ^ string_of_int seconds ^ " s") val result = Timeout.apply (Time.fromSeconds seconds) (fn () => SINGLE (SOLVE tac) st) () handle Timeout.TIMEOUT _ => NONE | ERROR _ => NONE in (case result of NONE => (writeln ("FAILURE: " ^ name); Seq.empty) | SOME st' => (writeln ("SUCCESS: " ^ name); Seq.single st')) end fun nitpick_finite_oracle_tac lthy timeout i th = let fun is_safe (Type (\<^type_name>\fun\, Ts)) = forall is_safe Ts | is_safe \<^typ>\prop\ = true | is_safe \<^typ>\bool\ = true | is_safe _ = false val conj = Thm.term_of (Thm.cprem_of th i) in if exists_type (not o is_safe) conj then Seq.empty else let val thy = Proof_Context.theory_of lthy val state = Proof.init lthy val params = [("box", "false"), ("max_threads", "1"), ("verbose", "true"), ("debug", if debug then "true" else "false"), ("overlord", if overlord then "true" else "false"), ("max_potential", "0"), ("timeout", string_of_int timeout)] |> Nitpick_Commands.default_params thy val i = 1 val n = 1 val step = 0 val subst = [] val (outcome, _) = Nitpick.pick_nits_in_term state params Nitpick.Normal i n step subst [] [] conj in if outcome = "none" then ALLGOALS (Skip_Proof.cheat_tac lthy) th else Seq.empty end end fun atp_tac lthy completeness override_params timeout assms prover = let val thy = Proof_Context.theory_of lthy val assm_ths0 = map (Skip_Proof.make_thm thy) assms val ((assm_name, _), lthy) = lthy |> Config.put Sledgehammer_Prover_ATP.atp_completish (if completeness > 0 then 3 else 0) |> Local_Theory.note ((\<^binding>\thms\, []), assm_ths0) fun ref_of th = (Facts.named (Thm.derivation_name th), []) val ref_of_assms = (Facts.named assm_name, []) in Sledgehammer_Tactics.sledgehammer_as_oracle_tac lthy ([("debug", if debug then "true" else "false"), ("overlord", if overlord then "true" else "false"), ("provers", prover), ("timeout", string_of_int timeout)] @ (if completeness > 0 then [("type_enc", if completeness = 1 then "mono_native" else "poly_tags")] else []) @ override_params) {add = ref_of_assms :: map ref_of [ext, @{thm someI}], del = [], only = true} [] end fun sledgehammer_tac demo lthy timeout assms i = let val frac = if demo then 16 else 12 fun slice mult completeness prover = SOLVE_TIMEOUT (mult * timeout div frac) (prover ^ (if completeness > 0 then "(" ^ string_of_int completeness ^ ")" else "")) (atp_tac lthy completeness [] (mult * timeout div frac) assms prover i) in slice 2 0 ATP_Proof.spassN ORELSE slice 2 0 ATP_Proof.vampireN ORELSE slice 2 0 ATP_Proof.eN ORELSE slice 2 0 ATP_Proof.z3_tptpN ORELSE slice 1 1 ATP_Proof.spassN ORELSE slice 1 2 ATP_Proof.eN ORELSE slice 1 1 ATP_Proof.vampireN ORELSE slice 1 2 ATP_Proof.vampireN ORELSE (if demo then slice 2 0 ATP_Proof.satallaxN ORELSE slice 2 0 ATP_Proof.leo2N else no_tac) end fun smt_solver_tac solver lthy = let val lthy = lthy |> Context.proof_map (SMT_Config.select_solver solver) in SMT_Solver.smt_tac lthy [] end fun auto_etc_tac lthy timeout assms i = SOLVE_TIMEOUT (timeout div 20) "nitpick" (nitpick_finite_oracle_tac lthy (timeout div 20) i) ORELSE SOLVE_TIMEOUT (timeout div 10) "simp" (asm_full_simp_tac lthy i) ORELSE SOLVE_TIMEOUT (timeout div 10) "blast" (blast_tac lthy i) ORELSE SOLVE_TIMEOUT (timeout div 5) "auto+spass" (auto_tac lthy THEN ALLGOALS (atp_tac lthy 0 [] (timeout div 5) assms ATP_Proof.spassN)) ORELSE SOLVE_TIMEOUT (timeout div 10) "fast" (fast_tac lthy i) ORELSE SOLVE_TIMEOUT (timeout div 20) "z3" (smt_solver_tac "z3" lthy i) ORELSE SOLVE_TIMEOUT (timeout div 20) "cvc4" (smt_solver_tac "cvc4" lthy i) ORELSE SOLVE_TIMEOUT (timeout div 20) "best" (best_tac lthy i) ORELSE SOLVE_TIMEOUT (timeout div 10) "force" (force_tac lthy i) ORELSE SOLVE_TIMEOUT (timeout div 10) "meson" (Meson.meson_tac lthy [] i) ORELSE SOLVE_TIMEOUT (timeout div 10) "fastforce" (fast_force_tac lthy i) -fun problem_const_prefix thy = Context.theory_name thy ^ Long_Name.separator - fun make_conj (defs, nondefs) conjs = Logic.list_implies (rev defs @ rev nondefs, case conjs of conj :: _ => conj | [] => \<^prop>\False\) fun print_szs_of_success conjs success = writeln ("% SZS status " ^ (if success then if null conjs then "Unsatisfiable" else "Theorem" else "GaveUp")) fun sledgehammer_tptp_file thy timeout file_name = let val (conjs, assms, lthy) = read_tptp_file thy snd file_name val conj = make_conj ([], []) conjs val assms = op @ assms in can_tac lthy (fn lthy => sledgehammer_tac true lthy timeout assms 1) conj |> print_szs_of_success conjs end fun generic_isabelle_tptp_file demo thy timeout file_name = let val (conjs, assms, lthy) = read_tptp_file thy snd file_name val conj = make_conj ([], []) conjs val full_conj = make_conj assms conjs val assms = op @ assms val (last_hope_atp, last_hope_completeness) = if demo then (ATP_Proof.satallaxN, 0) else (ATP_Proof.vampireN, 2) in (can_tac lthy (fn lthy => auto_etc_tac lthy (timeout div 2) assms 1) full_conj orelse can_tac lthy (fn lthy => sledgehammer_tac demo lthy (timeout div 2) assms 1) conj orelse can_tac lthy (fn lthy => SOLVE_TIMEOUT timeout (last_hope_atp ^ "(*)") (atp_tac lthy last_hope_completeness [] timeout assms last_hope_atp 1)) full_conj) |> print_szs_of_success conjs end val isabelle_tptp_file = generic_isabelle_tptp_file false val isabelle_hot_tptp_file = generic_isabelle_tptp_file true (** Translator between TPTP(-like) file formats **) fun translate_tptp_file thy format_str file_name = let val (conjs, (defs, nondefs), lthy) = read_tptp_file thy I file_name val conj = make_conj ([], []) (map snd conjs) val (format, type_enc, lam_trans) = (case format_str of "FOF" => (FOF, "mono_guards??", liftingN) - | "TF0" => (TFF (Without_FOOL, Monomorphic), "mono_native", liftingN) - | "TH0" => (THF (Without_FOOL, Monomorphic, THF_Without_Choice), "mono_native_higher", - keep_lamsN) + | "TF0" => (TFF (Monomorphic, Without_FOOL), "mono_native", liftingN) + | "TH0" => (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), + "mono_native_higher", keep_lamsN) | "DFG" => (DFG Monomorphic, "mono_native", liftingN) | _ => error ("Unknown format " ^ quote format_str ^ " (expected \"FOF\", \"TF0\", \"TH0\", or \"DFG\")")) val generate_info = false val uncurried_aliases = false val readable_names = true val presimp = true val facts = map (apfst (rpair (Global, Non_Rec_Def))) defs @ map (apfst (rpair (Global, General))) nondefs val (atp_problem, _, _, _) = generate_atp_problem lthy generate_info format Hypothesis (type_enc_of_string Strict type_enc) Translator lam_trans uncurried_aliases readable_names presimp [] conj facts val ord = Sledgehammer_ATP_Systems.effective_term_order lthy spassN val ord_info = K [] val lines = lines_of_atp_problem format ord ord_info atp_problem in List.app Output.physical_stdout lines end end; diff --git a/src/HOL/TPTP/atp_theory_export.ML b/src/HOL/TPTP/atp_theory_export.ML --- a/src/HOL/TPTP/atp_theory_export.ML +++ b/src/HOL/TPTP/atp_theory_export.ML @@ -1,364 +1,364 @@ (* Title: HOL/TPTP/atp_theory_export.ML Author: Jasmin Blanchette, TU Muenchen Copyright 2011 Export Isabelle theories as first-order TPTP inferences. *) signature ATP_THEORY_EXPORT = sig type atp_format = ATP_Problem.atp_format datatype inference_policy = No_Inferences | Unchecked_Inferences | Checked_Inferences val generate_atp_inference_file_for_theory : Proof.context -> theory -> atp_format -> inference_policy -> string -> string -> unit val generate_casc_lbt_isa_files_for_theory : Proof.context -> theory -> atp_format -> inference_policy -> string -> string -> unit end; structure ATP_Theory_Export : ATP_THEORY_EXPORT = struct open ATP_Problem open ATP_Proof open ATP_Problem_Generate open Sledgehammer_ATP_Systems val max_dependencies = 100 val max_facts = 512 val atp_timeout = seconds 0.5 datatype inference_policy = No_Inferences | Unchecked_Inferences | Checked_Inferences val prefix = Library.prefix val fact_name_of = prefix fact_prefix o ascii_of -fun atp_of_format (THF (_, Polymorphic, _)) = leo3N - | atp_of_format (THF (_, Monomorphic, _)) = satallaxN +fun atp_of_format (THF (Polymorphic, _, _)) = leo3N + | atp_of_format (THF (Monomorphic, _, _)) = satallaxN | atp_of_format (DFG Monomorphic) = spassN - | atp_of_format (TFF (_, Polymorphic)) = alt_ergoN - | atp_of_format (TFF (_, Monomorphic)) = vampireN + | atp_of_format (TFF (Polymorphic, _)) = alt_ergoN + | atp_of_format (TFF (Monomorphic, _)) = vampireN | atp_of_format FOF = eN (* FIXME? *) | atp_of_format CNF_UEQ = waldmeisterN | atp_of_format CNF = eN (* FIXME? *) fun run_atp ctxt format problem = let val thy = Proof_Context.theory_of ctxt val prob_file = File.tmp_path (Path.explode "prob") val atp = atp_of_format format val {exec, arguments, proof_delims, known_failures, ...} = get_atp thy atp () val ord = effective_term_order ctxt atp val _ = problem |> lines_of_atp_problem format ord (K []) |> File.write_list prob_file val path = getenv (List.last (fst exec)) ^ "/" ^ List.last (snd exec) val command = space_implode " " (File.bash_path (Path.explode path) :: arguments ctxt false "" atp_timeout prob_file (ord, K [], K [])) val outcome = Timeout.apply atp_timeout Isabelle_System.bash_output command |> fst |> extract_tstplike_proof_and_outcome false proof_delims known_failures |> snd handle Timeout.TIMEOUT _ => SOME TimedOut val _ = tracing ("Ran ATP: " ^ (case outcome of NONE => "Success" | SOME failure => string_of_atp_failure failure)) in outcome end fun is_problem_line_reprovable ctxt format prelude axioms deps (Formula (ident', _, phi, _, _)) = is_none (run_atp ctxt format ((factsN, Formula (ident', Conjecture, phi, NONE, []) :: map_filter (Symtab.lookup axioms) deps) :: prelude)) | is_problem_line_reprovable _ _ _ _ _ _ = false fun inference_term _ [] = NONE | inference_term check_infs ss = ATerm (("inference", []), [ATerm (("checked_isabelle" |> not check_infs ? prefix "un", []), []), ATerm ((tptp_empty_list, []), []), ATerm ((tptp_empty_list, []), map (fn s => ATerm ((s, []), [])) ss)]) |> SOME fun add_inferences_to_problem_line ctxt format check_infs prelude axioms infers (line as Formula ((ident, alt), Axiom, phi, NONE, info)) = let val deps = case these (AList.lookup (op =) infers ident) of [] => [] | deps => if check_infs andalso not (is_problem_line_reprovable ctxt format prelude axioms deps line) then [] else deps in Formula ((ident, alt), Lemma, phi, inference_term check_infs deps, info) end | add_inferences_to_problem_line _ _ _ _ _ _ line = line fun add_inferences_to_problem ctxt format check_infs prelude infers problem = let fun add_if_axiom (axiom as Formula ((ident, _), Axiom, _, _, _)) = Symtab.default (ident, axiom) | add_if_axiom _ = I val add_axioms_of_problem = fold (fold add_if_axiom o snd) val axioms = Symtab.empty |> check_infs ? add_axioms_of_problem problem in map (apsnd (map (add_inferences_to_problem_line ctxt format check_infs prelude axioms infers))) problem end fun ident_of_problem_line (Class_Decl (ident, _, _)) = ident | ident_of_problem_line (Type_Decl (ident, _, _)) = ident | ident_of_problem_line (Sym_Decl (ident, _, _)) = ident | ident_of_problem_line (Class_Memb (ident, _, _, _)) = ident | ident_of_problem_line (Formula ((ident, _), _, _, _, _)) = ident fun order_facts ord = sort (ord o apply2 ident_of_problem_line) fun order_problem_facts _ [] = [] | order_problem_facts ord ((heading, lines) :: problem) = if heading = factsN then (heading, order_facts ord lines) :: problem else (heading, lines) :: order_problem_facts ord problem (* A fairly random selection of types used for monomorphizing. *) val ground_types = [\<^typ>\nat\, HOLogic.intT, HOLogic.realT, \<^typ>\nat => bool\, \<^typ>\bool\, \<^typ>\unit\] fun ground_type_of_tvar _ [] tvar = raise TYPE ("ground_type_of_tvar", [TVar tvar], []) | ground_type_of_tvar thy (T :: Ts) tvar = if can (Sign.typ_match thy (TVar tvar, T)) Vartab.empty then T else ground_type_of_tvar thy Ts tvar fun monomorphize_term ctxt t = let val thy = Proof_Context.theory_of ctxt in t |> map_types (map_type_tvar (ground_type_of_tvar thy ground_types)) handle TYPE _ => \<^prop>\True\ end fun heading_sort_key heading = if String.isPrefix factsN heading then "_" ^ heading else heading fun problem_of_theory ctxt thy format infer_policy type_enc = let val css_table = Sledgehammer_Fact.clasimpset_rule_table_of ctxt val type_enc = type_enc |> type_enc_of_string Non_Strict |> adjust_type_enc format val mono = not (is_type_enc_polymorphic type_enc) val facts = Sledgehammer_Fact.all_facts (Proof_Context.init_global thy) true Keyword.empty_keywords [] [] css_table |> sort (Sledgehammer_MaSh.crude_thm_ord ctxt o apply2 snd) val problem = facts |> map (fn ((_, loc), th) => ((Thm.get_name_hint th, loc), th |> Thm.prop_of |> mono ? monomorphize_term ctxt)) |> generate_atp_problem ctxt true format Axiom type_enc Exporter combsN false false true [] \<^prop>\False\ |> #1 |> sort_by (heading_sort_key o fst) val prelude = fst (split_last problem) val name_tabs = Sledgehammer_Fact.build_name_tables Thm.get_name_hint facts val infers = if infer_policy = No_Inferences then [] else facts |> map (fn (_, th) => (fact_name_of (Thm.get_name_hint th), th |> Sledgehammer_Util.thms_in_proof max_dependencies (SOME name_tabs) |> these |> map fact_name_of)) val all_problem_names = problem |> maps (map ident_of_problem_line o snd) |> distinct (op =) val all_problem_name_set = Symtab.make (map (rpair ()) all_problem_names) val infers = infers |> filter (Symtab.defined all_problem_name_set o fst) |> map (apsnd (filter (Symtab.defined all_problem_name_set))) val maybe_add_edge = perhaps o try o String_Graph.add_edge_acyclic val ordered_names = String_Graph.empty |> fold (String_Graph.new_node o rpair ()) all_problem_names |> fold (fn (to, froms) => fold (fn from => maybe_add_edge (from, to)) froms) infers |> fold (fn (to, from) => maybe_add_edge (from, to)) (tl all_problem_names ~~ fst (split_last all_problem_names)) |> String_Graph.topological_order val order_tab = Symtab.empty |> fold (Symtab.insert (op =)) (ordered_names ~~ (1 upto length ordered_names)) val name_ord = int_ord o apply2 (the o Symtab.lookup order_tab) in (facts, problem |> (case format of DFG _ => I | _ => add_inferences_to_problem ctxt format (infer_policy = Checked_Inferences) prelude infers) |> order_problem_facts name_ord) end fun lines_of_problem ctxt format = lines_of_atp_problem format (effective_term_order ctxt eN (* dummy *)) (K []) fun write_lines path ss = let val _ = File.write path "" val _ = app (File.append path) ss in () end fun generate_atp_inference_file_for_theory ctxt thy format infer_policy type_enc file_name = let val (_, problem) = problem_of_theory ctxt thy format infer_policy type_enc val ss = lines_of_problem ctxt format problem in write_lines (Path.explode file_name) ss end fun ap dir = Path.append dir o Path.explode fun chop_maximal_groups eq xs = let val rev_xs = rev xs fun chop_group _ [] = [] | chop_group n (xs as x :: _) = let val n' = find_index (curry eq x) rev_xs val (ws', xs') = chop (n - n') xs in ws' :: chop_group n' xs' end in chop_group (length xs) xs end fun theory_name_of_fact (Formula ((_, alt), _, _, _, _)) = (case first_field Long_Name.separator alt of NONE => alt | SOME (thy, _) => thy) | theory_name_of_fact _ = "" val problem_suffix = ".p" val suggestion_suffix = ".sugg" val include_suffix = ".ax" val file_order_name = "FilesInProcessingOrder" val problem_order_name = "ProblemsInProcessingOrder" val problem_name = "problems" val suggestion_name = "suggestions" val include_name = "incl" val prelude_base_name = "prelude" val prelude_name = prelude_base_name ^ include_suffix val encode_meta = Sledgehammer_MaSh.encode_str fun include_base_name_of_fact x = encode_meta (theory_name_of_fact x) fun include_line base_name = "include('" ^ include_name ^ "/" ^ base_name ^ include_suffix ^ "').\n" val hol_base_name = encode_meta "HOL" fun should_generate_problem thy base_name (Formula ((_, alt), _, _, _, _)) = (case try (Global_Theory.get_thm thy) alt of SOME th => (* This is a crude hack to detect theorems stated and proved by the user (as opposed to those derived by various packages). In addition, we leave out everything in "HOL" as too basic to be interesting. *) Thm.legacy_get_kind th <> "" andalso base_name <> hol_base_name | NONE => false) (* Convention: theoryname__goalname *) fun problem_name_of base_name n alt = base_name ^ "__" ^ string_of_int n ^ "_" ^ perhaps (try (unprefix (base_name ^ "_"))) alt ^ problem_suffix fun suggestion_name_of base_name n alt = base_name ^ "__" ^ string_of_int n ^ "_" ^ perhaps (try (unprefix (base_name ^ "_"))) alt ^ suggestion_suffix fun generate_casc_lbt_isa_files_for_theory ctxt thy format infer_policy type_enc dir_name = let val dir = Isabelle_System.make_directory (Path.explode dir_name) val file_order_path = ap dir file_order_name val _ = File.write file_order_path "" val problem_order_path = ap dir problem_order_name val _ = File.write problem_order_path "" val problem_dir = Isabelle_System.make_directory (ap dir problem_name) val suggestion_dir = Isabelle_System.make_directory (ap dir suggestion_name) val include_dir = Isabelle_System.make_directory (ap problem_dir include_name) val (facts, (prelude, groups)) = problem_of_theory ctxt thy format infer_policy type_enc ||> split_last ||> apsnd (snd #> chop_maximal_groups (op = o apply2 theory_name_of_fact) #> map (`(include_base_name_of_fact o hd))) val fact_tab = Symtab.make (map (fn fact as (_, th) => (Thm.get_name_hint th, fact)) facts) fun write_prelude () = let val ss = lines_of_problem ctxt format prelude in File.append file_order_path (prelude_base_name ^ "\n"); write_lines (ap include_dir prelude_name) ss end fun write_include_file (base_name, fact_lines) = let val name = base_name ^ include_suffix val ss = lines_of_problem ctxt format [(factsN, fact_lines)] in File.append file_order_path (base_name ^ "\n"); write_lines (ap include_dir name) ss end fun select_facts_for_fact facts fact = let val (hyp_ts, conj_t) = Logic.strip_horn (Thm.prop_of (snd fact)) val mepo = Sledgehammer_MePo.mepo_suggested_facts ctxt (Sledgehammer_Commands.default_params thy []) max_facts NONE hyp_ts conj_t facts in map (suffix "\n" o fact_name_of o Thm.get_name_hint o snd) mepo end fun write_problem_files _ _ _ _ [] = () | write_problem_files _ seen_facts includes [] groups = write_problem_files 1 seen_facts includes includes groups | write_problem_files n seen_facts includes _ ((base_name, []) :: groups) = write_problem_files n seen_facts (includes @ [include_line base_name]) [] groups | write_problem_files n seen_facts includes seen_ss ((base_name, fact_line :: fact_lines) :: groups) = let val (alt, pname, sname, conj) = (case fact_line of Formula ((ident, alt), _, phi, _, _) => (alt, problem_name_of base_name n (encode_meta alt), suggestion_name_of base_name n (encode_meta alt), Formula ((ident, alt), Conjecture, phi, NONE, []))) val fact = the (Symtab.lookup fact_tab alt) val fact_s = tptp_string_of_line format fact_line in (if should_generate_problem thy base_name fact_line then let val conj_s = tptp_string_of_line format conj in File.append problem_order_path (pname ^ "\n"); write_lines (ap problem_dir pname) (seen_ss @ [conj_s]); write_lines (ap suggestion_dir sname) (select_facts_for_fact facts fact) end else (); write_problem_files (n + 1) (fact :: seen_facts) includes (seen_ss @ [fact_s]) ((base_name, fact_lines) :: groups)) end val _ = write_prelude () val _ = app write_include_file groups val _ = write_problem_files 1 [] [include_line prelude_base_name] [] groups in () end end; diff --git a/src/HOL/Tools/ATP/atp_problem.ML b/src/HOL/Tools/ATP/atp_problem.ML --- a/src/HOL/Tools/ATP/atp_problem.ML +++ b/src/HOL/Tools/ATP/atp_problem.ML @@ -1,1046 +1,1055 @@ (* Title: HOL/Tools/ATP/atp_problem.ML Author: Jia Meng, Cambridge University Computer Laboratory and NICTA Author: Jasmin Blanchette, TU Muenchen Author: Martin Desharnais, MPI-INF Saarbruecken Abstract representation of ATP problems and TPTP syntax. *) signature ATP_PROBLEM = sig datatype ('a, 'b) atp_term = ATerm of ('a * 'b list) * ('a, 'b) atp_term list | AAbs of (('a * 'b) * ('a, 'b) atp_term) * ('a, 'b) atp_term list datatype atp_quantifier = AForall | AExists datatype atp_connective = ANot | AAnd | AOr | AImplies | AIff datatype ('a, 'b, 'c, 'd) atp_formula = ATyQuant of atp_quantifier * ('b * 'd list) list * ('a, 'b, 'c, 'd) atp_formula | AQuant of atp_quantifier * ('a * 'b option) list * ('a, 'b, 'c, 'd) atp_formula | AConn of atp_connective * ('a, 'b, 'c, 'd) atp_formula list | AAtom of 'c datatype 'a atp_type = AType of ('a * 'a list) * 'a atp_type list | AFun of 'a atp_type * 'a atp_type | APi of 'a list * 'a atp_type type term_order = {is_lpo : bool, gen_weights : bool, gen_prec : bool, gen_simp : bool} - datatype fool = Without_FOOL | With_FOOL + type syntax = {with_ite : bool, with_let : bool} + datatype fool = Without_FOOL | With_FOOL of syntax datatype polymorphism = Monomorphic | Polymorphic datatype thf_flavor = THF_Lambda_Free | THF_Without_Choice | THF_With_Choice datatype atp_format = CNF | CNF_UEQ | FOF | - TFF of fool * polymorphism | - THF of fool * polymorphism * thf_flavor | + TFF of polymorphism * fool | + THF of polymorphism * syntax * thf_flavor | DFG of polymorphism datatype atp_formula_role = Axiom | Definition | Lemma | Hypothesis | Conjecture | Negated_Conjecture | Plain | Type_Role | Unknown datatype 'a atp_problem_line = Class_Decl of string * 'a * 'a list | Type_Decl of string * 'a * int | Sym_Decl of string * 'a * 'a atp_type | Datatype_Decl of string * ('a * 'a list) list * 'a atp_type * ('a, 'a atp_type) atp_term list * bool | Class_Memb of string * ('a * 'a list) list * 'a atp_type * 'a | Formula of (string * string) * atp_formula_role * ('a, 'a atp_type, ('a, 'a atp_type) atp_term, 'a) atp_formula * (string, string atp_type) atp_term option * (string, string atp_type) atp_term list type 'a atp_problem = (string * 'a atp_problem_line list) list val tptp_cnf : string val tptp_fof : string val tptp_tff : string val tptp_thf : string val tptp_has_type : string val tptp_type_of_types : string val tptp_bool_type : string val tptp_individual_type : string val tptp_fun_type : string val tptp_product_type : string val tptp_forall : string val tptp_ho_forall : string val tptp_pi_binder : string val tptp_exists : string val tptp_ho_exists : string val tptp_choice : string val tptp_ho_choice : string val tptp_hilbert_choice : string val tptp_hilbert_the : string val tptp_not : string val tptp_and : string val tptp_not_and : string val tptp_or : string val tptp_not_or : string val tptp_implies : string val tptp_if : string val tptp_iff : string val tptp_not_iff : string val tptp_ite : string val tptp_let : string val tptp_app : string val tptp_not_infix : string val tptp_equal : string val tptp_not_equal : string val tptp_old_equal : string val tptp_false : string val tptp_true : string val tptp_lambda : string val tptp_empty_list : string type tptp_builtin_desc = {arity : int, is_predicate : bool} val tptp_builtins : tptp_builtin_desc Symtab.table val dfg_individual_type : string val isabelle_info_prefix : string val isabelle_info : bool -> string -> int -> (string, 'a) atp_term list val extract_isabelle_status : (string, 'a) atp_term list -> string option val extract_isabelle_rank : (string, 'a) atp_term list -> int val inductionN : string val introN : string val inductiveN : string val elimN : string val simpN : string val non_rec_defN : string val rec_defN : string val rankN : string val minimum_rank : int val default_rank : int val default_term_order_weight : int val is_tptp_equal : string -> bool val is_built_in_tptp_symbol : string -> bool val is_tptp_variable : string -> bool val is_tptp_user_symbol : string -> bool val bool_atype : (string * string) atp_type val individual_atype : (string * string) atp_type val mk_anot : ('a, 'b, 'c, 'd) atp_formula -> ('a, 'b, 'c, 'd) atp_formula val mk_aconn : atp_connective -> ('a, 'b, 'c, 'd) atp_formula -> ('a, 'b, 'c, 'd) atp_formula -> ('a, 'b, 'c, 'd) atp_formula val mk_app : (string, 'a) atp_term -> (string, 'a) atp_term -> (string, 'a) atp_term val mk_apps : (string, 'a) atp_term -> (string, 'a) atp_term list -> (string, 'a) atp_term val mk_simple_aterm: 'a -> ('a, 'b) atp_term val aconn_fold : bool option -> (bool option -> 'a -> 'b -> 'b) -> atp_connective * 'a list -> 'b -> 'b val aconn_map : bool option -> (bool option -> 'a -> ('b, 'c, 'd, 'e) atp_formula) -> atp_connective * 'a list -> ('b, 'c, 'd, 'e) atp_formula val formula_fold : bool option -> (bool option -> 'c -> 'e -> 'e) -> ('a, 'b, 'c, 'd) atp_formula -> 'e -> 'e val formula_map : ('c -> 'e) -> ('a, 'b, 'c, 'd) atp_formula -> ('a, 'b, 'e, 'd) atp_formula val strip_atype : 'a atp_type -> 'a list * ('a atp_type list * 'a atp_type) val is_format_higher_order : atp_format -> bool val tptp_string_of_format : atp_format -> string val tptp_string_of_role : atp_formula_role -> string val tptp_string_of_line : atp_format -> string atp_problem_line -> string val lines_of_atp_problem : atp_format -> term_order -> (unit -> (string * int) list) -> string atp_problem -> string list val ensure_cnf_problem : (string * string) atp_problem -> (string * string) atp_problem val filter_cnf_ueq_problem : (string * string) atp_problem -> (string * string) atp_problem val declared_in_atp_problem : 'a atp_problem -> ('a list * 'a list) * 'a list val nice_atp_problem : bool -> atp_format -> ('a * (string * string) atp_problem_line list) list -> ('a * string atp_problem_line list) list * (string Symtab.table * string Symtab.table) option end; structure ATP_Problem : ATP_PROBLEM = struct open ATP_Util val parens = enclose "(" ")" (** ATP problem **) datatype ('a, 'b) atp_term = ATerm of ('a * 'b list) * ('a, 'b) atp_term list | AAbs of (('a * 'b) * ('a, 'b) atp_term) * ('a, 'b) atp_term list datatype atp_quantifier = AForall | AExists datatype atp_connective = ANot | AAnd | AOr | AImplies | AIff datatype ('a, 'b, 'c, 'd) atp_formula = ATyQuant of atp_quantifier * ('b * 'd list) list * ('a, 'b, 'c, 'd) atp_formula | AQuant of atp_quantifier * ('a * 'b option) list * ('a, 'b, 'c, 'd) atp_formula | AConn of atp_connective * ('a, 'b, 'c, 'd) atp_formula list | AAtom of 'c datatype 'a atp_type = AType of ('a * 'a list) * 'a atp_type list | AFun of 'a atp_type * 'a atp_type | APi of 'a list * 'a atp_type type term_order = {is_lpo : bool, gen_weights : bool, gen_prec : bool, gen_simp : bool} -datatype fool = Without_FOOL | With_FOOL + +type syntax = {with_ite : bool, with_let : bool} +datatype fool = Without_FOOL | With_FOOL of syntax datatype polymorphism = Monomorphic | Polymorphic datatype thf_flavor = THF_Lambda_Free | THF_Without_Choice | THF_With_Choice datatype atp_format = CNF | CNF_UEQ | FOF | - TFF of fool * polymorphism | - THF of fool * polymorphism * thf_flavor | + TFF of polymorphism * fool | + THF of polymorphism * syntax * thf_flavor | DFG of polymorphism datatype atp_formula_role = Axiom | Definition | Lemma | Hypothesis | Conjecture | Negated_Conjecture | Plain | Type_Role | Unknown datatype 'a atp_problem_line = Class_Decl of string * 'a * 'a list | Type_Decl of string * 'a * int | Sym_Decl of string * 'a * 'a atp_type | Datatype_Decl of string * ('a * 'a list) list * 'a atp_type * ('a, 'a atp_type) atp_term list * bool | Class_Memb of string * ('a * 'a list) list * 'a atp_type * 'a | Formula of (string * string) * atp_formula_role * ('a, 'a atp_type, ('a, 'a atp_type) atp_term, 'a) atp_formula * (string, string atp_type) atp_term option * (string, string atp_type) atp_term list type 'a atp_problem = (string * 'a atp_problem_line list) list (* official TPTP syntax *) val tptp_cnf = "cnf" val tptp_fof = "fof" val tptp_tff = "tff" val tptp_thf = "thf" val tptp_has_type = ":" val tptp_type_of_types = "$tType" val tptp_bool_type = "$o" val tptp_individual_type = "$i" val tptp_fun_type = ">" val tptp_product_type = "*" val tptp_forall = "!" val tptp_ho_forall = "!!" val tptp_pi_binder = "!>" val tptp_exists = "?" val tptp_ho_exists = "??" val tptp_choice = "@+" val tptp_ho_choice = "@@+" val tptp_not = "~" val tptp_and = "&" val tptp_not_and = "~&" val tptp_or = "|" val tptp_not_or = "~|" val tptp_implies = "=>" val tptp_if = "<=" val tptp_iff = "<=>" val tptp_not_iff = "<~>" val tptp_ite = "$ite" val tptp_let = "$let" val tptp_app = "@" val tptp_hilbert_choice = "@+" val tptp_hilbert_the = "@-" val tptp_not_infix = "!" val tptp_equal = "=" val tptp_not_equal = "!=" val tptp_old_equal = "equal" val tptp_false = "$false" val tptp_true = "$true" val tptp_lambda = "^" val tptp_empty_list = "[]" (* A predicate has return type $o (i.e. bool) *) type tptp_builtin_desc = {arity : int, is_predicate : bool} val tptp_builtins = let fun make_builtin arity is_predicate name = (name, {arity = arity, is_predicate = is_predicate}) in map (make_builtin 0 true) [tptp_false, tptp_true] @ - map (make_builtin 1 true) [tptp_not] @ + map (make_builtin 1 true) [tptp_not, tptp_ho_forall, tptp_ho_exists] @ map (make_builtin 2 true) [tptp_and, tptp_or, tptp_implies, tptp_if, tptp_iff, tptp_not_iff, tptp_equal, tptp_old_equal] @ map (make_builtin 2 false) [tptp_let] @ map (make_builtin 3 false) [tptp_ite] |> Symtab.make end val dfg_individual_type = "iii" (* cannot clash *) val isabelle_info_prefix = "isabelle_" val inductionN = "induction" val introN = "intro" val inductiveN = "inductive" val elimN = "elim" val simpN = "simp" val non_rec_defN = "non_rec_def" val rec_defN = "rec_def" val rankN = "rank" val minimum_rank = 0 val default_rank = 1000 val default_term_order_weight = 1 (* Currently, only SPASS 3.8ds and (to a lesser extent) Metis can process Isabelle metainformation. *) fun isabelle_info generate_info status rank = if generate_info then [] |> rank <> default_rank ? cons (ATerm ((isabelle_info_prefix ^ rankN, []), [ATerm ((string_of_int rank, []), [])])) |> status <> "" ? cons (ATerm ((isabelle_info_prefix ^ status, []), [])) else [] fun extract_isabelle_status (ATerm ((s, []), []) :: _) = try (unprefix isabelle_info_prefix) s | extract_isabelle_status _ = NONE fun extract_isabelle_rank (tms as _ :: _) = (case List.last tms of ATerm ((_, []), [ATerm ((rank, []), [])]) => the (Int.fromString rank) | _ => default_rank) | extract_isabelle_rank _ = default_rank fun is_tptp_equal s = (s = tptp_equal orelse s = tptp_old_equal) fun is_built_in_tptp_symbol s = s = tptp_old_equal orelse not (Char.isAlpha (String.sub (s, 0))) fun is_tptp_variable s = s <> "" andalso Char.isUpper (String.sub (s, 0)) val is_tptp_user_symbol = not o (is_tptp_variable orf is_built_in_tptp_symbol) val bool_atype = AType ((`I tptp_bool_type, []), []) val individual_atype = AType ((`I tptp_individual_type, []), []) fun raw_polarities_of_conn ANot = (SOME false, NONE) | raw_polarities_of_conn AAnd = (SOME true, SOME true) | raw_polarities_of_conn AOr = (SOME true, SOME true) | raw_polarities_of_conn AImplies = (SOME false, SOME true) | raw_polarities_of_conn AIff = (NONE, NONE) fun polarities_of_conn NONE = K (NONE, NONE) | polarities_of_conn (SOME pos) = raw_polarities_of_conn #> not pos ? apply2 (Option.map not) fun mk_anot (AConn (ANot, [phi])) = phi | mk_anot phi = AConn (ANot, [phi]) fun mk_aconn c phi1 phi2 = AConn (c, [phi1, phi2]) fun mk_app t u = ATerm ((tptp_app, []), [t, u]) fun mk_apps f xs = fold (fn x => fn f => mk_app f x) xs f fun mk_simple_aterm p = ATerm ((p, []), []) fun aconn_fold pos f (ANot, [phi]) = f (Option.map not pos) phi | aconn_fold pos f (AImplies, [phi1, phi2]) = f (Option.map not pos) phi1 #> f pos phi2 | aconn_fold pos f (AAnd, phis) = fold (f pos) phis | aconn_fold pos f (AOr, phis) = fold (f pos) phis | aconn_fold _ f (_, phis) = fold (f NONE) phis fun aconn_map pos f (ANot, [phi]) = AConn (ANot, [f (Option.map not pos) phi]) | aconn_map pos f (AImplies, [phi1, phi2]) = AConn (AImplies, [f (Option.map not pos) phi1, f pos phi2]) | aconn_map pos f (AAnd, phis) = AConn (AAnd, map (f pos) phis) | aconn_map pos f (AOr, phis) = AConn (AOr, map (f pos) phis) | aconn_map _ f (c, phis) = AConn (c, map (f NONE) phis) fun formula_fold pos f = let fun fld pos (AQuant (_, _, phi)) = fld pos phi | fld pos (ATyQuant (_, _, phi)) = fld pos phi | fld pos (AConn conn) = aconn_fold pos fld conn | fld pos (AAtom tm) = f pos tm in fld pos end fun formula_map f (AQuant (q, xs, phi)) = AQuant (q, xs, formula_map f phi) | formula_map f (ATyQuant (q, xs, phi)) = ATyQuant (q, xs, formula_map f phi) | formula_map f (AConn (c, phis)) = AConn (c, map (formula_map f) phis) | formula_map f (AAtom tm) = AAtom (f tm) fun strip_api (APi (tys, ty)) = strip_api ty |>> append tys | strip_api ty = ([], ty) fun strip_afun (AFun (ty1, ty2)) = strip_afun ty2 |>> cons ty1 | strip_afun ty = ([], ty) fun strip_atype ty = ty |> strip_api ||> strip_afun fun is_function_atype ty = snd (snd (strip_atype ty)) <> AType ((tptp_bool_type, []), []) fun is_predicate_atype ty = not (is_function_atype ty) fun is_nontrivial_predicate_atype (AType _) = false | is_nontrivial_predicate_atype ty = is_predicate_atype ty fun is_format_higher_order (THF _) = true | is_format_higher_order _ = false + +fun is_format_higher_order_with_choice (THF (_, _, THF_With_Choice)) = true + | is_format_higher_order_with_choice _ = false + fun is_format_typed (TFF _) = true | is_format_typed (THF _) = true | is_format_typed (DFG _) = true | is_format_typed _ = false -fun is_format_with_fool (THF (With_FOOL, _, _)) = true - | is_format_with_fool (TFF (With_FOOL, _)) = true +fun is_format_with_fool (THF _) = true + | is_format_with_fool (TFF (_, With_FOOL _)) = true | is_format_with_fool _ = false fun tptp_string_of_role Axiom = "axiom" | tptp_string_of_role Definition = "definition" | tptp_string_of_role Lemma = "lemma" | tptp_string_of_role Hypothesis = "hypothesis" | tptp_string_of_role Conjecture = "conjecture" | tptp_string_of_role Negated_Conjecture = "negated_conjecture" | tptp_string_of_role Plain = "plain" | tptp_string_of_role Type_Role = "type" | tptp_string_of_role Unknown = "unknown" fun tptp_string_of_app _ func [] = func | tptp_string_of_app format func args = if is_format_higher_order format then "(" ^ space_implode (" " ^ tptp_app ^ " ") (func :: args) ^ ")" else func ^ "(" ^ commas args ^ ")" fun uncurry_type (APi (tys, ty)) = APi (tys, uncurry_type ty) | uncurry_type (ty as AFun (ty1 as AType _, ty2)) = (case uncurry_type ty2 of AFun (ty' as AType ((s, _), tys), ty) => AFun (AType ((tptp_product_type, []), ty1 :: (if s = tptp_product_type then tys else [ty'])), ty) | _ => ty) | uncurry_type (ty as AType _) = ty | uncurry_type _ = raise Fail "unexpected higher-order type in first-order format" val suffix_type_of_types = suffix (" " ^ tptp_has_type ^ " " ^ tptp_type_of_types) fun str_of_type format ty = let val dfg = (case format of DFG _ => true | _ => false) fun str _ (AType ((s, _), [])) = if dfg andalso s = tptp_individual_type then dfg_individual_type else s | str rhs (AType ((s, _), tys)) = if s = tptp_fun_type then let val [ty1, ty2] = tys in str rhs (AFun (ty1, ty2)) end else let val ss = tys |> map (str false) in if s = tptp_product_type then ss |> space_implode (if dfg then ", " else " " ^ tptp_product_type ^ " ") |> (not dfg andalso length ss > 1) ? parens else tptp_string_of_app format s ss end | str rhs (AFun (ty1, ty2)) = (str false ty1 |> dfg ? parens) ^ " " ^ (if dfg then "" else tptp_fun_type ^ " ") ^ str true ty2 |> not rhs ? parens | str _ (APi (ss, ty)) = if dfg then "[" ^ commas ss ^ "], " ^ str true ty else tptp_pi_binder ^ "[" ^ commas (map suffix_type_of_types ss) ^ "]: " ^ str false ty in str true ty end fun string_of_type (format as THF _) ty = str_of_type format ty | string_of_type format ty = str_of_type format (uncurry_type ty) fun tptp_string_of_quantifier AForall = tptp_forall | tptp_string_of_quantifier AExists = tptp_exists fun tptp_string_of_connective ANot = tptp_not | tptp_string_of_connective AAnd = tptp_and | tptp_string_of_connective AOr = tptp_or | tptp_string_of_connective AImplies = tptp_implies | tptp_string_of_connective AIff = tptp_iff fun string_of_bound_var format (s, ty) = s ^ (if is_format_typed format then " " ^ tptp_has_type ^ " " ^ (ty |> the_default (AType ((tptp_individual_type, []), [])) |> string_of_type format) else "") fun tptp_string_of_term _ (ATerm ((s, []), [])) = s |> Symtab.defined tptp_builtins s ? parens | tptp_string_of_term format (ATerm ((s, tys), ts)) = let val of_type = string_of_type format val of_term = tptp_string_of_term format fun app0 f types args = tptp_string_of_app format (f |> Symtab.defined tptp_builtins f ? parens) (map (of_type #> is_format_higher_order format ? parens) types @ map of_term args) fun app () = app0 s tys ts in if s = tptp_empty_list then (* used for lists in the optional "source" field of a derivation *) "[" ^ commas (map of_term ts) ^ "]" else if is_tptp_equal s then space_implode (" " ^ tptp_equal ^ " ") (map of_term ts) |> (is_format_higher_order format orelse is_format_with_fool format) ? parens else if s = tptp_ho_forall orelse s = tptp_ho_exists then (case ts of [AAbs (((s', ty), tm), [])] => (* Use syntactic sugar "!" and "?" instead of "!!" and "??" whenever possible, to work around LEO-II, Leo-III, and Satallax parser limitation. *) tptp_string_of_formula format (AQuant (if s = tptp_ho_forall then AForall else AExists, [(s', SOME ty)], AAtom tm)) | _ => app ()) else if s = tptp_let then (case ts of t1 :: AAbs (((s', ty), tm), []) :: ts => let val declaration = s' ^ " : " ^ of_type ty val definition = s' ^ " := " ^ of_term t1 val usage = of_term tm in if ts = [] orelse is_format_higher_order format then app0 (s ^ "(" ^ declaration ^ ", " ^ definition ^ ", " ^ usage ^ ")") [] ts else - error (tptp_let ^ " is special syntax and more than three arguments is only \ + error (tptp_let ^ " is special syntax and more than two arguments is only \ \supported in higher order") end | _ => error (tptp_let ^ " is special syntax and must have at least two arguments")) else if s = tptp_ite then (case ts of t1 :: t2 :: t3 :: ts => if ts = [] orelse is_format_higher_order format then app0 (s ^ "(" ^ of_term t1 ^ ", " ^ of_term t2 ^ ", " ^ of_term t3 ^ ")") [] ts else - error (tptp_ite ^" is special syntax and more than three arguments is only supported \ + error (tptp_ite ^ " is special syntax and more than three arguments is only supported \ \in higher order") - | _ => error "$ite is special syntax and must have at least three arguments") + | _ => error (tptp_ite ^ " is special syntax and must have at least three arguments")) else if s = tptp_choice then (case ts of - [AAbs (((s', ty), tm), args)] => + (AAbs (((s', ty), tm), args) :: ts) => (* There is code in "ATP_Problem_Generate" to ensure that "Eps" is always applied to an abstraction. *) - tptp_string_of_app format - (tptp_choice ^ "[" ^ s' ^ " : " ^ of_type ty ^ - "]: " ^ of_term tm ^ "" - |> parens) (map of_term args) - | _ => app ()) + if ts = [] orelse is_format_higher_order_with_choice format then + let val declaration = s' ^ " : " ^ of_type ty in + app0 ("(" ^ tptp_choice ^ "[" ^ declaration ^ "]: " ^ of_term tm ^ ")") [] (args @ ts) + end + else + error (tptp_choice ^ " is only supported in higher order") + | _ => error (tptp_choice ^ " must be followed by a lambda abstraction")) else (case (Symtab.lookup tptp_builtins s, ts) of (SOME {arity = 1, is_predicate = true}, [t]) => (* generate e.g. "~ t" instead of "~ @ t". *) s ^ " " ^ (of_term t |> parens) |> parens | (SOME {arity = 2, is_predicate = true}, [t1, t2]) => (* generate e.g. "t1 & t2" instead of "& @ t1 @ t2". *) (of_term t1 |> parens) ^ " " ^ s ^ " " ^ (of_term t2 |> parens) |> parens | _ => app ()) end | tptp_string_of_term (format as THF _) (AAbs (((s, ty), tm), args)) = tptp_string_of_app format ("(^[" ^ s ^ " : " ^ string_of_type format ty ^ "]: " ^ tptp_string_of_term format tm ^ ")") (map (tptp_string_of_term format) args) | tptp_string_of_term _ _ = raise Fail "unexpected term in first-order format" and tptp_string_of_formula format (ATyQuant (q, xs, phi)) = tptp_string_of_quantifier q ^ "[" ^ commas (map (suffix_type_of_types o string_of_type format o fst) xs) ^ "]: " ^ tptp_string_of_formula format phi |> parens | tptp_string_of_formula format (AQuant (q, xs, phi)) = tptp_string_of_quantifier q ^ "[" ^ commas (map (string_of_bound_var format) xs) ^ "]: " ^ tptp_string_of_formula format phi |> parens | tptp_string_of_formula format (AConn (ANot, [AAtom (ATerm (("=" (* tptp_equal *), []), ts))])) = space_implode (" " ^ tptp_not_infix ^ tptp_equal ^ " ") (map (tptp_string_of_term format) ts) |> is_format_higher_order format ? parens | tptp_string_of_formula format (AConn (c, [phi])) = tptp_string_of_connective c ^ " " ^ (tptp_string_of_formula format phi |> is_format_higher_order format ? parens) |> parens | tptp_string_of_formula format (AConn (c, phis)) = space_implode (" " ^ tptp_string_of_connective c ^ " ") (map (tptp_string_of_formula format) phis) |> parens | tptp_string_of_formula format (AAtom tm) = tptp_string_of_term format tm fun tptp_string_of_format CNF = tptp_cnf | tptp_string_of_format CNF_UEQ = tptp_cnf | tptp_string_of_format FOF = tptp_fof | tptp_string_of_format (TFF _) = tptp_tff | tptp_string_of_format (THF _) = tptp_thf | tptp_string_of_format (DFG _) = raise Fail "non-TPTP format" val atype_of_types = AType ((tptp_type_of_types, []), []) fun nary_type_decl_type n = funpow n (curry AFun atype_of_types) atype_of_types fun maybe_alt "" = "" | maybe_alt s = " % " ^ s fun tptp_string_of_line format (Type_Decl (ident, ty, ary)) = tptp_string_of_line format (Sym_Decl (ident, ty, nary_type_decl_type ary)) | tptp_string_of_line format (Sym_Decl (ident, sym, ty)) = tptp_string_of_format format ^ "(" ^ ident ^ ", type,\n " ^ sym ^ " : " ^ string_of_type format ty ^ ").\n" | tptp_string_of_line format (Formula ((ident, alt), kind, phi, source, info)) = tptp_string_of_format format ^ "(" ^ ident ^ ", " ^ tptp_string_of_role kind ^ "," ^ "\n (" ^ tptp_string_of_formula format phi ^ ")" ^ (case source of SOME tm => ", " ^ tptp_string_of_term format tm | NONE => if null info then "" else ", []") ^ (case info of [] => "" | tms => ", [" ^ commas (map (tptp_string_of_term format) tms) ^ "]") ^ ")." ^ maybe_alt alt ^ "\n" fun tptp_lines format = maps (fn (_, []) => [] | (heading, lines) => "\n% " ^ heading ^ " (" ^ string_of_int (length lines) ^ ")\n" :: map (tptp_string_of_line format) lines) fun arity_of_type (APi (tys, ty)) = arity_of_type ty |>> Integer.add (length tys) | arity_of_type (AFun (_, ty)) = arity_of_type ty ||> Integer.add 1 | arity_of_type _ = (0, 0) fun string_of_arity (0, n) = string_of_int n | string_of_arity (m, n) = string_of_int m ^ "+" ^ string_of_int n val dfg_class_inter = space_implode " & " fun dfg_string_of_term (ATerm ((s, tys), tms)) = s ^ (if null tys then "" else "<" ^ commas (map (string_of_type (DFG Polymorphic)) tys) ^ ">") ^ (if null tms then "" else "(" ^ commas (map dfg_string_of_term tms) ^ ")") | dfg_string_of_term _ = raise Fail "unexpected atom in first-order format" fun dfg_string_of_formula poly gen_simp info = let val str_of_typ = string_of_type (DFG poly) fun str_of_bound_typ (ty, []) = str_of_typ ty | str_of_bound_typ (ty, cls) = str_of_typ ty ^ " : " ^ dfg_class_inter cls fun suffix_tag top_level s = if top_level then (case extract_isabelle_status info of SOME s' => if s' = non_rec_defN then s ^ ":lt" else if (s' = simpN orelse s' = rec_defN) andalso gen_simp then s ^ ":lr" else s | NONE => s) else s fun str_of_atom top_level (ATerm ((s, tys), tms)) = let val s' = if is_tptp_equal s then "equal" |> suffix_tag top_level else if s = tptp_true then "true" else if s = tptp_false then "false" else s in dfg_string_of_term (ATerm ((s', tys), tms)) end | str_of_atom _ _ = raise Fail "unexpected atom in first-order format" fun str_of_quant AForall = "forall" | str_of_quant AExists = "exists" fun str_of_conn _ ANot = "not" | str_of_conn _ AAnd = "and" | str_of_conn _ AOr = "or" | str_of_conn _ AImplies = "implies" | str_of_conn top_level AIff = "equiv" |> suffix_tag top_level fun str_of_formula top_level (ATyQuant (q, xs, phi)) = str_of_quant q ^ "_sorts([" ^ commas (map str_of_bound_typ xs) ^ "], " ^ str_of_formula top_level phi ^ ")" | str_of_formula top_level (AQuant (q, xs, phi)) = str_of_quant q ^ "([" ^ commas (map (string_of_bound_var (DFG poly)) xs) ^ "], " ^ str_of_formula top_level phi ^ ")" | str_of_formula top_level (AConn (c, phis)) = str_of_conn top_level c ^ "(" ^ commas (map (str_of_formula false) phis) ^ ")" | str_of_formula top_level (AAtom tm) = str_of_atom top_level tm in str_of_formula true end fun maybe_enclose bef aft "" = "% " ^ bef ^ aft | maybe_enclose bef aft s = bef ^ s ^ aft fun dfg_lines poly {is_lpo, gen_weights, gen_prec, gen_simp} ord_info problem = let val typ = string_of_type (DFG poly) val term = dfg_string_of_term fun spair (s, s') = "(" ^ s ^ ", " ^ s' ^ ")" fun tm_ary sym ty = spair (sym, string_of_arity (arity_of_type ty)) fun ty_ary 0 ty = ty | ty_ary n ty = "(" ^ ty ^ ", " ^ string_of_int n ^ ")" fun fun_typ sym ty = "function(" ^ sym ^ ", " ^ typ ty ^ ")." fun pred_typ sym ty = let val (ty_vars, (tys, _)) = strip_atype ty |>> (fn [] => [] | xs => ["[" ^ commas xs ^ "]"]) in "predicate(" ^ commas (sym :: ty_vars @ map typ tys) ^ ")." end fun bound_tvar (ty, []) = ty | bound_tvar (ty, cls) = ty ^ " : " ^ dfg_class_inter cls fun binder_typ xs ty = (if null xs then "" else "[" ^ commas (map bound_tvar xs) ^ "], ") ^ typ ty fun sort_decl xs ty cl = "sort(" ^ binder_typ xs ty ^ ", " ^ cl ^ ")." fun datatype_decl xs ty tms exhaust = "datatype(" ^ commas (binder_typ xs ty :: map term tms @ (if exhaust then [] else ["..."])) ^ ")." fun subclass_of sub super = "subclass(" ^ sub ^ ", " ^ super ^ ")." fun formula pred (Formula ((ident, alt), kind, phi, _, info)) = if pred kind then let val rank = extract_isabelle_rank info in "formula(" ^ dfg_string_of_formula poly gen_simp info phi ^ ", " ^ ident ^ (if rank = default_rank then "" else ", " ^ string_of_int rank) ^ ")." ^ maybe_alt alt |> SOME end else NONE | formula _ _ = NONE fun filt f = problem |> map (map_filter f o snd) |> filter_out null val func_aries = filt (fn Sym_Decl (_, sym, ty) => if is_function_atype ty then SOME (tm_ary sym ty) else NONE | _ => NONE) |> flat |> commas |> maybe_enclose "functions [" "]." val pred_aries = filt (fn Sym_Decl (_, sym, ty) => if is_predicate_atype ty then SOME (tm_ary sym ty) else NONE | _ => NONE) |> flat |> commas |> maybe_enclose "predicates [" "]." val sorts = filt (try (fn Type_Decl (_, ty, ary) => ty_ary ary ty)) @ [[ty_ary 0 dfg_individual_type]] |> flat |> commas |> maybe_enclose "sorts [" "]." val classes = filt (try (fn Class_Decl (_, cl, _) => cl)) |> flat |> commas |> maybe_enclose "classes [" "]." val ord_info = if gen_weights orelse gen_prec then ord_info () else [] val do_term_order_weights = (if gen_weights then ord_info else []) |> map (spair o apsnd string_of_int) |> commas |> maybe_enclose "weights [" "]." val syms = [func_aries, pred_aries, do_term_order_weights, sorts, classes] val func_decls = filt (fn Sym_Decl (_, sym, ty) => if is_function_atype ty then SOME (fun_typ sym ty) else NONE | _ => NONE) |> flat val pred_decls = filt (fn Sym_Decl (_, sym, ty) => if is_nontrivial_predicate_atype ty then SOME (pred_typ sym ty) else NONE | _ => NONE) |> flat val datatype_decls = filt (try (fn Datatype_Decl (_, xs, ty, tms, exhaust) => datatype_decl xs ty tms exhaust)) |> flat val sort_decls = filt (try (fn Class_Memb (_, xs, ty, cl) => sort_decl xs ty cl)) |> flat val subclass_decls = filt (try (fn Class_Decl (_, sub, supers) => map (subclass_of sub) supers)) |> flat |> flat val decls = func_decls @ pred_decls @ datatype_decls @ sort_decls @ subclass_decls val axioms = filt (formula (curry (op <>) Conjecture)) |> separate [""] |> flat val conjs = filt (formula (curry (op =) Conjecture)) |> separate [""] |> flat val settings = (if is_lpo then ["set_flag(Ordering, 1)."] else []) @ (if gen_prec then [ord_info |> map fst |> rev |> commas |> maybe_enclose "set_precedence(" ")."] else []) fun list_of _ [] = [] | list_of heading ss = "list_of_" ^ heading ^ ".\n" :: map (suffix "\n") ss @ ["end_of_list.\n\n"] in "\nbegin_problem(isabelle).\n\n" :: list_of "descriptions" ["name({**}).", "author({**}).", "status(unknown).", "description({**})."] @ list_of "symbols" syms @ list_of "declarations" decls @ list_of "formulae(axioms)" axioms @ list_of "formulae(conjectures)" conjs @ list_of "settings(SPASS)" settings @ ["end_problem.\n"] end fun lines_of_atp_problem format ord ord_info problem = "% This file was generated by Isabelle (most likely Sledgehammer)\n\ \% " ^ timestamp () ^ "\n" :: (case format of DFG poly => dfg_lines poly ord ord_info | _ => tptp_lines format) problem (** CNF (Metis) and CNF UEQ (Waldmeister) **) fun is_line_negated (Formula (_, _, AConn (ANot, _), _, _)) = true | is_line_negated _ = false fun is_line_cnf_ueq (Formula (_, _, AAtom (ATerm (((s, _), _), _)), _, _)) = is_tptp_equal s | is_line_cnf_ueq _ = false fun open_conjecture_term (ATerm (((s, s'), tys), tms)) = ATerm ((if is_tptp_variable s then (s |> Name.desymbolize (SOME false), s') else (s, s'), tys), tms |> map open_conjecture_term) | open_conjecture_term _ = raise Fail "unexpected higher-order term" fun open_formula conj = let (* We are conveniently assuming that all bound variable names are distinct, which should be the case for the formulas we generate. *) fun opn (pos as SOME true) (AQuant (AForall, _, phi)) = opn pos phi | opn (pos as SOME false) (AQuant (AExists, _, phi)) = opn pos phi | opn pos (AConn (ANot, [phi])) = mk_anot (opn (Option.map not pos) phi) | opn pos (AConn (c, [phi1, phi2])) = let val (pos1, pos2) = polarities_of_conn pos c in AConn (c, [opn pos1 phi1, opn pos2 phi2]) end | opn _ (AAtom t) = AAtom (t |> conj ? open_conjecture_term) | opn _ phi = phi in opn (SOME (not conj)) end fun open_formula_line (Formula (ident, kind, phi, source, info)) = Formula (ident, kind, open_formula (kind = Conjecture) phi, source, info) | open_formula_line line = line fun negate_conjecture_line (Formula (ident, Conjecture, phi, source, info)) = Formula (ident, Hypothesis, mk_anot phi, source, info) | negate_conjecture_line line = line exception CLAUSIFY of unit (* This "clausification" only expands syntactic sugar, such as "phi => psi" to "~ phi | psi" and "phi <=> psi" to "~ phi | psi" and "~ psi | phi". We don't attempt to distribute conjunctions over disjunctions. *) fun clausify_formula pos (phi as AAtom _) = [phi |> not pos ? mk_anot] | clausify_formula pos (AConn (ANot, [phi])) = clausify_formula (not pos) phi | clausify_formula true (AConn (AOr, [phi1, phi2])) = (phi1, phi2) |> apply2 (clausify_formula true) |> uncurry (map_product (mk_aconn AOr)) | clausify_formula false (AConn (AAnd, [phi1, phi2])) = (phi1, phi2) |> apply2 (clausify_formula false) |> uncurry (map_product (mk_aconn AOr)) | clausify_formula true (AConn (AImplies, [phi1, phi2])) = clausify_formula true (AConn (AOr, [mk_anot phi1, phi2])) | clausify_formula true (AConn (AIff, phis)) = clausify_formula true (AConn (AImplies, phis)) @ clausify_formula true (AConn (AImplies, rev phis)) | clausify_formula _ _ = raise CLAUSIFY () fun clausify_formula_line (Formula ((ident, alt), kind, phi, source, info)) = let val (n, phis) = phi |> try (clausify_formula true) |> these |> `length in map2 (fn phi => fn j => Formula ((ident ^ replicate_string (j - 1) "x", alt), kind, phi, source, info)) phis (1 upto n) end | clausify_formula_line _ = [] fun ensure_cnf_line line = line |> open_formula_line |> negate_conjecture_line |> clausify_formula_line fun ensure_cnf_problem problem = problem |> map (apsnd (maps ensure_cnf_line)) fun filter_cnf_ueq_problem problem = problem |> map (apsnd (map open_formula_line #> filter is_line_cnf_ueq #> map negate_conjecture_line)) |> (fn problem => let val lines = problem |> maps snd val conjs = lines |> filter is_line_negated in if length conjs = 1 andalso conjs <> lines then problem else [] end) (** Symbol declarations **) fun add_declared_in_line (Class_Decl (_, cl, _)) = apfst (apfst (cons cl)) | add_declared_in_line (Type_Decl (_, ty, _)) = apfst (apsnd (cons ty)) | add_declared_in_line (Sym_Decl (_, sym, _)) = apsnd (cons sym) | add_declared_in_line _ = I fun declared_in_atp_problem problem = fold (fold add_declared_in_line o snd) problem (([], []), []) (** Nice names **) val no_qualifiers = let fun skip [] = [] | skip (#"." :: cs) = skip cs | skip (c :: cs) = if Char.isAlphaNum c then skip cs else c :: keep cs and keep [] = [] | keep (#"." :: cs) = skip cs | keep (c :: cs) = c :: keep cs in String.explode #> rev #> keep #> rev #> String.implode end (* Long names can slow down the ATPs. *) val max_readable_name_size = 20 (* "equal" is reserved by some ATPs. "op" is also reserved, to avoid the unreadable "op_1", "op_2", etc., in the problem files. "eq" is reserved to ensure that "HOL.eq" is correctly mapped to equality (not clear whether this is still necessary). *) val reserved_nice_names = [tptp_old_equal, "op", "eq"] (* hack to get the same hashing across Mirabelle runs (see "mirabelle.pl") *) fun cleanup_mirabelle_name s = let val mirabelle_infix = "_Mirabelle_" val random_suffix_len = 10 val (s1, s2) = Substring.position mirabelle_infix (Substring.full s) in if Substring.isEmpty s2 then s else Substring.string s1 ^ Substring.string (Substring.triml (size mirabelle_infix + random_suffix_len) s2) end fun readable_name protect full_name s = (if s = full_name then s else s |> no_qualifiers |> unquote_tvar |> Name.desymbolize (SOME (Char.isUpper (String.sub (full_name, 0)))) |> (fn s => if size s > max_readable_name_size then String.substring (s, 0, max_readable_name_size div 2 - 4) ^ string_of_int (hash_string (cleanup_mirabelle_name full_name)) ^ String.extract (s, size s - max_readable_name_size div 2 + 4, NONE) else s) |> (fn s => if member (op =) reserved_nice_names s then full_name else s)) |> protect fun nice_name _ (full_name, _) NONE = (full_name, NONE) | nice_name protect (full_name, desired_name) (SOME the_pool) = if is_built_in_tptp_symbol full_name then (full_name, SOME the_pool) else (case Symtab.lookup (fst the_pool) full_name of SOME nice_name => (nice_name, SOME the_pool) | NONE => let val nice_prefix = readable_name protect full_name desired_name fun add j = let val nice_name = nice_prefix ^ (if j = 1 then "" else string_of_int j) in (case Symtab.lookup (snd the_pool) nice_name of SOME full_name' => if full_name = full_name' then (nice_name, the_pool) else add (j + 1) | NONE => (nice_name, (Symtab.update_new (full_name, nice_name) (fst the_pool), Symtab.update_new (nice_name, full_name) (snd the_pool)))) end in add 1 |> apsnd SOME end) fun avoid_clash_with_dfg_keywords s = let val n = String.size s in if n < 2 orelse (n = 2 andalso String.sub (s, 0) = String.sub (s, 1)) orelse String.isSubstring "_" s then s else if is_tptp_variable s then s ^ "_" else String.substring (s, 0, n - 1) ^ String.str (Char.toUpper (String.sub (s, n - 1))) end fun nice_atp_problem readable_names format problem = let val empty_pool = if readable_names then SOME (Symtab.empty, Symtab.empty) else NONE val avoid_clash = (case format of DFG _ => avoid_clash_with_dfg_keywords | _ => I) val nice_name = nice_name avoid_clash fun nice_bound_tvars xs = fold_map (nice_name o fst) xs ##>> fold_map (fold_map nice_name o snd) xs #>> op ~~ fun nice_type (AType ((name, clss), tys)) = nice_name name ##>> fold_map nice_name clss ##>> fold_map nice_type tys #>> AType | nice_type (AFun (ty1, ty2)) = nice_type ty1 ##>> nice_type ty2 #>> AFun | nice_type (APi (names, ty)) = fold_map nice_name names ##>> nice_type ty #>> APi fun nice_term (ATerm ((name, tys), ts)) = nice_name name ##>> fold_map nice_type tys ##>> fold_map nice_term ts #>> ATerm | nice_term (AAbs (((name, ty), tm), args)) = nice_name name ##>> nice_type ty ##>> nice_term tm ##>> fold_map nice_term args #>> AAbs fun nice_formula (ATyQuant (q, xs, phi)) = fold_map (nice_type o fst) xs ##>> fold_map (fold_map nice_name o snd) xs ##>> nice_formula phi #>> (fn ((tys, cls), phi) => ATyQuant (q, tys ~~ cls, phi)) | nice_formula (AQuant (q, xs, phi)) = fold_map (nice_name o fst) xs ##>> fold_map (fn (_, NONE) => pair NONE | (_, SOME ty) => nice_type ty #>> SOME) xs ##>> nice_formula phi #>> (fn ((ss, ts), phi) => AQuant (q, ss ~~ ts, phi)) | nice_formula (AConn (c, phis)) = fold_map nice_formula phis #>> curry AConn c | nice_formula (AAtom tm) = nice_term tm #>> AAtom fun nice_line (Class_Decl (ident, cl, cls)) = nice_name cl ##>> fold_map nice_name cls #>> (fn (cl, cls) => Class_Decl (ident, cl, cls)) | nice_line (Type_Decl (ident, ty, ary)) = nice_name ty #>> (fn ty => Type_Decl (ident, ty, ary)) | nice_line (Sym_Decl (ident, sym, ty)) = nice_name sym ##>> nice_type ty #>> (fn (sym, ty) => Sym_Decl (ident, sym, ty)) | nice_line (Datatype_Decl (ident, xs, ty, tms, exhaust)) = nice_bound_tvars xs ##>> nice_type ty ##>> fold_map nice_term tms #>> (fn ((xs, ty), tms) => Datatype_Decl (ident, xs, ty, tms, exhaust)) | nice_line (Class_Memb (ident, xs, ty, cl)) = nice_bound_tvars xs ##>> nice_type ty ##>> nice_name cl #>> (fn ((xs, ty), cl) => Class_Memb (ident, xs, ty, cl)) | nice_line (Formula (ident, kind, phi, source, info)) = nice_formula phi #>> (fn phi => Formula (ident, kind, phi, source, info)) fun nice_problem problem = fold_map (fn (heading, lines) => fold_map nice_line lines #>> pair heading) problem in nice_problem problem empty_pool end end; diff --git a/src/HOL/Tools/ATP/atp_problem_generate.ML b/src/HOL/Tools/ATP/atp_problem_generate.ML --- a/src/HOL/Tools/ATP/atp_problem_generate.ML +++ b/src/HOL/Tools/ATP/atp_problem_generate.ML @@ -1,3030 +1,3059 @@ (* Title: HOL/Tools/ATP/atp_problem_generate.ML Author: Fabian Immler, TU Muenchen Author: Makarius Author: Jasmin Blanchette, TU Muenchen Author: Martin Desharnais, UniBw Muenchen Translation of HOL to FOL for Metis and Sledgehammer. *) signature ATP_PROBLEM_GENERATE = sig type ('a, 'b) atp_term = ('a, 'b) ATP_Problem.atp_term type atp_connective = ATP_Problem.atp_connective type ('a, 'b, 'c, 'd) atp_formula = ('a, 'b, 'c, 'd) ATP_Problem.atp_formula type atp_format = ATP_Problem.atp_format type atp_formula_role = ATP_Problem.atp_formula_role type 'a atp_problem = 'a ATP_Problem.atp_problem datatype mode = Metis | Sledgehammer | Sledgehammer_Completish of int | Exporter | Translator datatype scope = Global | Local | Assum | Chained datatype status = General | Induction | Intro | Inductive | Elim | Simp | Non_Rec_Def | Rec_Def type stature = scope * status datatype strictness = Strict | Non_Strict datatype uniformity = Uniform | Non_Uniform datatype ctr_optim = With_Ctr_Optim | Without_Ctr_Optim datatype type_level = All_Types | Undercover_Types | Nonmono_Types of strictness * uniformity | Const_Types of ctr_optim | No_Types type type_enc val no_lamsN : string val opaque_liftingN : string val liftingN : string val opaque_combsN : string val combsN : string val combs_and_liftingN : string val combs_or_liftingN : string val keep_lamsN : string val schematic_var_prefix : string val fixed_var_prefix : string val tvar_prefix : string val tfree_prefix : string val const_prefix : string val type_const_prefix : string val class_prefix : string val lam_lifted_prefix : string val lam_lifted_mono_prefix : string val lam_lifted_poly_prefix : string val skolem_const_prefix : string val old_skolem_const_prefix : string val new_skolem_const_prefix : string val combinator_prefix : string val class_decl_prefix : string val type_decl_prefix : string val sym_decl_prefix : string val datatype_decl_prefix : string val class_memb_prefix : string val guards_sym_formula_prefix : string val tags_sym_formula_prefix : string val fact_prefix : string val conjecture_prefix : string val helper_prefix : string val subclass_prefix : string val tcon_clause_prefix : string val tfree_clause_prefix : string val lam_fact_prefix : string val typed_helper_suffix : string val untyped_helper_suffix : string val predicator_name : string val app_op_name : string val type_guard_name : string val type_tag_name : string val native_type_prefix : string val prefixed_predicator_name : string val prefixed_app_op_name : string val prefixed_type_tag_name : string val ascii_of : string -> string val unascii_of : string -> string val unprefix_and_unascii : string -> string -> string option val proxy_table : (string * (string * (thm * (string * string)))) list val proxify_const : string -> (string * string) option val invert_const : string -> string val unproxify_const : string -> string val new_skolem_var_name_of_const : string -> string val atp_logical_consts : string list val atp_irrelevant_consts : string list val atp_widely_irrelevant_consts : string list val is_irrelevant_const : string -> bool val is_widely_irrelevant_const : string -> bool val atp_schematic_consts_of : term -> typ list Symtab.table val is_type_enc_higher_order : type_enc -> bool val is_type_enc_polymorphic : type_enc -> bool val level_of_type_enc : type_enc -> type_level val is_type_enc_sound : type_enc -> bool val type_enc_of_string : strictness -> string -> type_enc val adjust_type_enc : atp_format -> type_enc -> type_enc val is_first_order_lambda_free : term -> bool val do_cheaply_conceal_lambdas : typ list -> term -> term val mk_aconns : atp_connective -> ('a, 'b, 'c, 'd) atp_formula list -> ('a, 'b, 'c, 'd) atp_formula val unmangled_type : string -> (string, 'a) ATP_Problem.atp_term val unmangled_const : string -> string * (string, 'b) atp_term list val unmangled_const_name : string -> string list val helper_table : bool -> ((string * bool) * (status * thm) list) list val trans_lams_of_string : Proof.context -> type_enc -> string -> term list -> term list * term list val string_of_status : status -> string val factsN : string val generate_atp_problem : Proof.context -> bool -> atp_format -> atp_formula_role -> type_enc -> mode -> string -> bool -> bool -> bool -> term list -> term -> ((string * stature) * term) list -> string atp_problem * string Symtab.table * (string * term) list * int Symtab.table val atp_problem_selection_weights : string atp_problem -> (string * real) list val atp_problem_term_order_info : string atp_problem -> (string * int) list end; structure ATP_Problem_Generate : ATP_PROBLEM_GENERATE = struct open ATP_Util open ATP_Problem datatype mode = Metis | Sledgehammer | Sledgehammer_Completish of int | Exporter | Translator datatype scope = Global | Local | Assum | Chained datatype status = General | Induction | Intro | Inductive | Elim | Simp | Non_Rec_Def | Rec_Def type stature = scope * status datatype order = First_Order | Higher_Order of thf_flavor datatype phantom_policy = Without_Phantom_Type_Vars | With_Phantom_Type_Vars datatype polymorphism = Type_Class_Polymorphic | Raw_Polymorphic of phantom_policy | Raw_Monomorphic | Mangled_Monomorphic datatype strictness = Strict | Non_Strict datatype uniformity = Uniform | Non_Uniform datatype ctr_optim = With_Ctr_Optim | Without_Ctr_Optim datatype type_level = All_Types | Undercover_Types | Nonmono_Types of strictness * uniformity | Const_Types of ctr_optim | No_Types -type syntax = {with_ite: bool} - datatype type_enc = - Native of order * fool * syntax * polymorphism * type_level | + Native of order * fool * polymorphism * type_level | Guards of polymorphism * type_level | Tags of polymorphism * type_level (* not clear whether ATPs prefer to have their negative variables tagged *) val tag_neg_vars = false fun is_type_enc_native (Native _) = true | is_type_enc_native _ = false -fun is_type_enc_full_higher_order (Native (Higher_Order THF_Lambda_Free, _, _, _, _)) = false - | is_type_enc_full_higher_order (Native (Higher_Order _, _, _, _, _)) = true +fun is_type_enc_full_higher_order (Native (Higher_Order THF_Lambda_Free, _, _, _)) = false + | is_type_enc_full_higher_order (Native (Higher_Order _, _, _, _)) = true | is_type_enc_full_higher_order _ = false -fun is_type_enc_fool (Native (_, With_FOOL, _, _, _)) = true +fun is_type_enc_fool (Native (_, With_FOOL _, _, _)) = true | is_type_enc_fool _ = false -fun is_type_enc_higher_order (Native (Higher_Order _, _, _, _, _)) = true +fun is_type_enc_higher_order (Native (Higher_Order _, _, _, _)) = true | is_type_enc_higher_order _ = false -fun has_type_enc_ite (Native (_, _, {with_ite, ...}, _, _)) = with_ite + +fun has_type_enc_choice (Native (Higher_Order THF_With_Choice, _, _, _)) = true + | has_type_enc_choice _ = false +fun has_type_enc_ite (Native (_, With_FOOL {with_ite, ...}, _, _)) = with_ite | has_type_enc_ite _ = false +fun has_type_enc_let (Native (_, With_FOOL {with_let, ...}, _, _)) = with_let + | has_type_enc_let _ = false -fun polymorphism_of_type_enc (Native (_, _, _, poly, _)) = poly +fun polymorphism_of_type_enc (Native (_, _, poly, _)) = poly | polymorphism_of_type_enc (Guards (poly, _)) = poly | polymorphism_of_type_enc (Tags (poly, _)) = poly fun is_type_enc_polymorphic type_enc = (case polymorphism_of_type_enc type_enc of Raw_Polymorphic _ => true | Type_Class_Polymorphic => true | _ => false) fun is_type_enc_mangling type_enc = polymorphism_of_type_enc type_enc = Mangled_Monomorphic -fun level_of_type_enc (Native (_, _, _, _, level)) = level +fun level_of_type_enc (Native (_, _, _, level)) = level | level_of_type_enc (Guards (_, level)) = level | level_of_type_enc (Tags (_, level)) = level fun is_type_level_uniform (Nonmono_Types (_, Non_Uniform)) = false | is_type_level_uniform Undercover_Types = false | is_type_level_uniform _ = true fun is_type_level_sound (Const_Types _) = false | is_type_level_sound No_Types = false | is_type_level_sound _ = true val is_type_enc_sound = is_type_level_sound o level_of_type_enc fun is_type_level_monotonicity_based (Nonmono_Types _) = true | is_type_level_monotonicity_based _ = false val no_lamsN = "no_lams" (* used internally; undocumented *) val opaque_liftingN = "opaque_lifting" val liftingN = "lifting" val opaque_combsN = "opaque_combs" val combsN = "combs" val combs_and_liftingN = "combs_and_lifting" val combs_or_liftingN = "combs_or_lifting" val keep_lamsN = "keep_lams" (* The capitalization of the TPTP output respects the capitalzation of the prefix. *) val bound_var_prefix = "B_" val let_bound_var_prefix = "l_" val all_bound_var_prefix = "A_" val exist_bound_var_prefix = "E_" val schematic_var_prefix = "V_" val fixed_var_prefix = "v_" val tvar_prefix = "T_" val tfree_prefix = "tf_" val const_prefix = "c_" val type_const_prefix = "t_" val native_type_prefix = "n_" val class_prefix = "cl_" (* Freshness almost guaranteed! *) val atp_prefix = "ATP" ^ Long_Name.separator val atp_weak_prefix = "ATP:" val atp_weak_suffix = ":ATP" val lam_lifted_prefix = atp_weak_prefix ^ "Lam" val lam_lifted_mono_prefix = lam_lifted_prefix ^ "m" val lam_lifted_poly_prefix = lam_lifted_prefix ^ "p" val skolem_const_prefix = atp_prefix ^ "Sko" val old_skolem_const_prefix = skolem_const_prefix ^ "o" val new_skolem_const_prefix = skolem_const_prefix ^ "n" val combinator_prefix = "COMB" val class_decl_prefix = "cl_" val type_decl_prefix = "ty_" val sym_decl_prefix = "sy_" val datatype_decl_prefix = "dt_" val class_memb_prefix = "cm_" val guards_sym_formula_prefix = "gsy_" val tags_sym_formula_prefix = "tsy_" val uncurried_alias_eq_prefix = "unc_" val fact_prefix = "fact_" val conjecture_prefix = "conj_" val helper_prefix = "help_" val subclass_prefix = "subcl_" val tcon_clause_prefix = "tcon_" val tfree_clause_prefix = "tfree_" val lam_fact_prefix = "ATP.lambda_" val typed_helper_suffix = "_T" val untyped_helper_suffix = "_U" val predicator_name = "pp" val app_op_name = "aa" val type_guard_name = "gg" val type_tag_name = "tt" val prefixed_predicator_name = const_prefix ^ predicator_name val prefixed_app_op_name = const_prefix ^ app_op_name val prefixed_type_tag_name = const_prefix ^ type_tag_name (*Escaping of special characters. Alphanumeric characters are left unchanged. The character _ goes to __. Characters in the range ASCII space to / go to _A to _P, respectively. Other characters go to _nnn where nnn is the decimal ASCII code. *) val upper_a_minus_space = Char.ord #"A" - Char.ord #" " fun ascii_of_char c = if Char.isAlphaNum c then String.str c else if c = #"_" then "__" else if #" " <= c andalso c <= #"/" then "_" ^ String.str (Char.chr (Char.ord c + upper_a_minus_space)) else (* fixed width, in case more digits follow *) "_" ^ stringN_of_int 3 (Char.ord c) val ascii_of = String.translate ascii_of_char (** Remove ASCII armoring from names in proof files **) (* We don't raise error exceptions because this code can run inside a worker thread. Also, the errors are impossible. *) val unascii_of = let fun un rcs [] = String.implode (rev rcs) | un rcs [#"_"] = un (#"_" :: rcs) [] (* ERROR *) (* Three types of _ escapes: __, _A to _P, _nnn *) | un rcs (#"_" :: #"_" :: cs) = un (#"_" :: rcs) cs | un rcs (#"_" :: c :: cs) = if #"A" <= c andalso c<= #"P" then (* translation of #" " to #"/" *) un (Char.chr (Char.ord c - upper_a_minus_space) :: rcs) cs else let val digits = List.take (c :: cs, 3) handle General.Subscript => [] in (case Int.fromString (String.implode digits) of SOME n => un (Char.chr n :: rcs) (List.drop (cs, 2)) | NONE => un (c :: #"_" :: rcs) cs (* ERROR *)) end | un rcs (c :: cs) = un (c :: rcs) cs in un [] o String.explode end (* If string s has the prefix s1, return the result of deleting it, un-ASCII'd. *) fun unprefix_and_unascii s1 s = if String.isPrefix s1 s then SOME (unascii_of (String.extract (s, size s1, NONE))) else NONE val proxy_table = [("c_False", (\<^const_name>\False\, (@{thm fFalse_def}, ("fFalse", \<^const_name>\fFalse\)))), ("c_True", (\<^const_name>\True\, (@{thm fTrue_def}, ("fTrue", \<^const_name>\fTrue\)))), ("c_Not", (\<^const_name>\Not\, (@{thm fNot_def}, ("fNot", \<^const_name>\fNot\)))), ("c_conj", (\<^const_name>\conj\, (@{thm fconj_def}, ("fconj", \<^const_name>\fconj\)))), ("c_disj", (\<^const_name>\disj\, (@{thm fdisj_def}, ("fdisj", \<^const_name>\fdisj\)))), ("c_implies", (\<^const_name>\implies\, (@{thm fimplies_def}, ("fimplies", \<^const_name>\fimplies\)))), ("equal", (\<^const_name>\HOL.eq\, (@{thm fequal_def}, ("fequal", \<^const_name>\fequal\)))), ("c_All", (\<^const_name>\All\, (@{thm fAll_def}, ("fAll", \<^const_name>\fAll\)))), - ("c_Ex", (\<^const_name>\Ex\, (@{thm fEx_def}, ("fEx", \<^const_name>\fEx\))))] + ("c_Ex", (\<^const_name>\Ex\, (@{thm fEx_def}, ("fEx", \<^const_name>\fEx\)))), + ("c_Choice", (\<^const_name>\Hilbert_Choice.Eps\, (@{thm fChoice_def}, ("fChoice", \<^const_name>\fChoice\))))] val proxify_const = AList.lookup (op =) proxy_table #> Option.map (snd o snd) (* Readable names for the more common symbolic functions. Do not mess with the table unless you know what you are doing. *) val const_trans_table = [(\<^const_name>\False\, "False"), (\<^const_name>\True\, "True"), (\<^const_name>\Not\, "Not"), (\<^const_name>\conj\, "conj"), (\<^const_name>\disj\, "disj"), (\<^const_name>\implies\, "implies"), (\<^const_name>\HOL.eq\, "equal"), (\<^const_name>\All\, "All"), (\<^const_name>\Ex\, "Ex"), (\<^const_name>\If\, "If"), (\<^const_name>\Set.member\, "member"), (\<^const_name>\HOL.Let\, "Let"), + (\<^const_name>\Hilbert_Choice.Eps\, "Choice"), (\<^const_name>\Meson.COMBI\, combinator_prefix ^ "I"), (\<^const_name>\Meson.COMBK\, combinator_prefix ^ "K"), (\<^const_name>\Meson.COMBB\, combinator_prefix ^ "B"), (\<^const_name>\Meson.COMBC\, combinator_prefix ^ "C"), (\<^const_name>\Meson.COMBS\, combinator_prefix ^ "S")] |> Symtab.make |> fold (Symtab.update o swap o snd o snd o snd) proxy_table (* Invert the table of translations between Isabelle and ATPs. *) val const_trans_table_inv = const_trans_table |> Symtab.dest |> map swap |> Symtab.make val const_trans_table_unprox = Symtab.empty |> fold (fn (_, (isa, (_, (_, atp)))) => Symtab.update (atp, isa)) proxy_table val invert_const = perhaps (Symtab.lookup const_trans_table_inv) val unproxify_const = perhaps (Symtab.lookup const_trans_table_unprox) fun lookup_const c = (case Symtab.lookup const_trans_table c of SOME c' => c' | NONE => ascii_of c) fun ascii_of_indexname (v, 0) = ascii_of v | ascii_of_indexname (v, i) = ascii_of v ^ "_" ^ string_of_int i fun make_bound_var x = bound_var_prefix ^ ascii_of x fun make_all_bound_var x = all_bound_var_prefix ^ ascii_of x fun make_exist_bound_var x = exist_bound_var_prefix ^ ascii_of x fun make_schematic_var v = schematic_var_prefix ^ ascii_of_indexname v fun make_fixed_var x = fixed_var_prefix ^ ascii_of x fun make_tvar (s, i) = tvar_prefix ^ ascii_of_indexname (unquote_tvar s, i) fun make_tfree s = tfree_prefix ^ ascii_of (unquote_tvar s) fun tvar_name ((x as (s, _)), _) = (make_tvar x, s) (* "HOL.eq" and choice are mapped to the ATP's equivalents *) -local - val choice_const = (fst o dest_Const o HOLogic.choice_const) dummyT - fun default c = const_prefix ^ lookup_const c -in - fun make_fixed_const _ \<^const_name>\HOL.eq\ = tptp_old_equal - | make_fixed_const (SOME (Native (Higher_Order THF_With_Choice, _, _, _, _))) c = - if c = choice_const then tptp_choice else default c - | make_fixed_const _ c = default c -end +fun make_fixed_const _ \<^const_name>\HOL.eq\ = tptp_old_equal + | make_fixed_const _ c = const_prefix ^ lookup_const c fun make_fixed_type_const c = type_const_prefix ^ lookup_const c fun make_class clas = class_prefix ^ ascii_of clas fun new_skolem_var_name_of_const s = let val ss = Long_Name.explode s in nth ss (length ss - 2) end (* These are ignored anyway by the relevance filter (unless they appear in higher-order places) but not by the monomorphizer. *) val atp_logical_consts = [\<^const_name>\Pure.prop\, \<^const_name>\Pure.conjunction\, \<^const_name>\Pure.all\, \<^const_name>\Pure.imp\, \<^const_name>\Pure.eq\, \<^const_name>\Trueprop\, \<^const_name>\All\, \<^const_name>\Ex\, \<^const_name>\Ex1\, \<^const_name>\Ball\, \<^const_name>\Bex\] (* These are either simplified away by "Meson.presimplify" (most of the time) or handled specially via "fFalse", "fTrue", ..., "fequal". *) val atp_irrelevant_consts = [\<^const_name>\False\, \<^const_name>\True\, \<^const_name>\Not\, \<^const_name>\conj\, \<^const_name>\disj\, \<^const_name>\implies\, \<^const_name>\HOL.eq\, \<^const_name>\If\, \<^const_name>\Let\] val atp_widely_irrelevant_consts = atp_logical_consts @ atp_irrelevant_consts val atp_irrelevant_const_tab = Symtab.make (map (rpair ()) atp_irrelevant_consts) val atp_widely_irrelevant_const_tab = Symtab.make (map (rpair ()) atp_widely_irrelevant_consts) val is_irrelevant_const = Symtab.defined atp_irrelevant_const_tab val is_widely_irrelevant_const = Symtab.defined atp_widely_irrelevant_const_tab fun add_schematic_const (x as (_, T)) = Monomorph.typ_has_tvars T ? Symtab.insert_list (op =) x val add_schematic_consts_of = Term.fold_aterms (fn Const (x as (s, _)) => not (member (op =) atp_widely_irrelevant_consts s) ? add_schematic_const x | _ => I) fun atp_schematic_consts_of t = add_schematic_consts_of t Symtab.empty val tvar_a_str = "'a" val tvar_a_z = ((tvar_a_str, 0), \<^sort>\type\) val tvar_a = TVar tvar_a_z val tvar_a_name = tvar_name tvar_a_z val itself_name = `make_fixed_type_const \<^type_name>\itself\ val TYPE_name = `(make_fixed_const NONE) \<^const_name>\Pure.type\ val tvar_a_atype = AType ((tvar_a_name, []), []) val a_itself_atype = AType ((itself_name, []), [tvar_a_atype]) (** Definitions and functions for FOL clauses and formulas for TPTP **) (** Type class membership **) (* In our data structures, [] exceptionally refers to the top class, not to the empty class. *) val class_of_types = the_single \<^sort>\type\ fun normalize_classes cls = if member (op =) cls class_of_types then [] else cls (* Arity of type constructor "s :: (arg1, ..., argN) res" *) fun make_axiom_tcon_clause (s, name, (cl, args)) = let val args = args |> map normalize_classes val tvars = 1 upto length args |> map (fn j => TVar ((tvar_a_str, j), \<^sort>\type\)) in (name, args ~~ tvars, (cl, Type (s, tvars))) end (* Generate all pairs (tycon, class, sorts) such that tycon belongs to class in theory thy provided its arguments have the corresponding sorts. *) fun class_pairs thy tycons cls = let val alg = Sign.classes_of thy fun domain_sorts tycon = Sorts.mg_domain alg tycon o single fun add_class tycon cl = cons (cl, domain_sorts tycon cl) handle Sorts.CLASS_ERROR _ => I fun try_classes tycon = (tycon, fold (add_class tycon) cls []) in map try_classes tycons end (* Proving one (tycon, class) membership may require proving others, so iterate. *) fun all_class_pairs _ _ _ [] = ([], []) | all_class_pairs thy tycons old_cls cls = let val old_cls' = cls @ old_cls fun maybe_insert_class s = not (member (op =) old_cls' s) ? insert (op =) s val pairs = class_pairs thy tycons cls val new_cls = fold (fold (fold (fold maybe_insert_class) o snd) o snd) pairs [] val (cls', pairs') = all_class_pairs thy tycons old_cls' new_cls in (cls' @ cls, union (op =) pairs' pairs) end fun tcon_clause _ _ [] = [] | tcon_clause seen n ((_, []) :: rest) = tcon_clause seen n rest | tcon_clause seen n ((tcons, (ar as (cl, _)) :: ars) :: rest) = if cl = class_of_types then tcon_clause seen n ((tcons, ars) :: rest) else if member (op =) seen cl then (* multiple clauses for the same (tycon, cl) pair *) make_axiom_tcon_clause (tcons, lookup_const tcons ^ "___" ^ ascii_of cl ^ "_" ^ string_of_int n, ar) :: tcon_clause seen (n + 1) ((tcons, ars) :: rest) else make_axiom_tcon_clause (tcons, lookup_const tcons ^ "___" ^ ascii_of cl, ar) :: tcon_clause (cl :: seen) n ((tcons, ars) :: rest) fun make_tcon_clauses thy tycons = all_class_pairs thy tycons [] ##> tcon_clause [] 1 (** Isabelle class relations **) (* Generate a list ("sub", "supers") such that "sub" is a proper subclass of all "supers". *) fun make_subclass_pairs thy subs supers = let val class_less = curry (Sorts.class_less (Sign.classes_of thy)) fun supers_of sub = (sub, filter (class_less sub) supers) in map supers_of subs |> filter_out (null o snd) end (* intermediate terms *) (* TODO: Merge IConst and IVar *) datatype iterm = IConst of (string * string) * typ * typ list | IVar of (string * string) * typ | IApp of iterm * iterm | IAbs of ((string * string) * typ) * iterm fun alpha_rename from to = let fun traverse (tm as IConst (name, T, Ts)) = if name = from then IConst (to, T, Ts) else tm | traverse (tm as IVar (name, T)) = if name = from then IVar (to, T) else tm | traverse (tm as IApp (tm1, tm2)) = let val tm1' = traverse tm1 val tm2' = traverse tm2 in if tm1 = tm1' andalso tm2 = tm2' then tm else IApp (tm1', tm2') end | traverse (tm as IAbs (binding as (name, _), tm1)) = if name = from then tm else let val tm1' = traverse tm1 in if tm1 = tm1' then tm else IAbs (binding, tm1') end in traverse end fun ityp_of (IConst (_, T, _)) = T | ityp_of (IVar (_, T)) = T | ityp_of (IApp (t1, _)) = snd (dest_funT (ityp_of t1)) | ityp_of (IAbs ((_, T), tm)) = T --> ityp_of tm (*gets the head of a combinator application, along with the list of arguments*) fun strip_iterm_comb u = let fun stripc (IApp (t, u), ts) = stripc (t, u :: ts) | stripc x = x in stripc (u, []) end fun atomic_types_of T = fold_atyps (insert (op =)) T [] fun new_skolem_const_name s num_T_args = [new_skolem_const_prefix, s, string_of_int num_T_args] |> Long_Name.implode val alpha_to_beta = Logic.varifyT_global \<^typ>\'a => 'b\ val alpha_to_beta_to_alpha_to_beta = alpha_to_beta --> alpha_to_beta fun robust_const_type thy s = if s = app_op_name then alpha_to_beta_to_alpha_to_beta else if String.isPrefix lam_lifted_prefix s then alpha_to_beta else (* Old Skolems throw a "TYPE" exception here, which will be caught. *) s |> Sign.the_const_type thy fun ary_of (Type (\<^type_name>\fun\, [_, T])) = 1 + ary_of T | ary_of _ = 0 (* This function only makes sense if "T" is as general as possible. *) fun robust_const_type_args thy (s, T) = if s = app_op_name then let val (T1, T2) = T |> domain_type |> dest_funT in [T1, T2] end else if String.isPrefix old_skolem_const_prefix s then [] |> Term.add_tvarsT T |> rev |> map TVar else if String.isPrefix lam_lifted_prefix s then if String.isPrefix lam_lifted_poly_prefix s then let val (T1, T2) = T |> dest_funT in [T1, T2] end else [] else (s, T) |> Sign.const_typargs thy (* Converts an Isabelle/HOL term (with combinators) into an intermediate term. Also accumulates sort infomation. *) fun iterm_of_term thy type_enc = let fun iot true bs ((t0 as Const (\<^const_name>\Let\, _)) $ t1 $ (t2 as Abs (s, T, t'))) = let val (t0', t0_atomics_Ts) = iot true bs t0 val (t1', t1_atomics_Ts) = iot true bs t1 val (t2', t2_atomics_Ts) = iot true bs t2 in (IApp (IApp (t0', t1'), t2'), fold (union (op =)) [t0_atomics_Ts, t1_atomics_Ts, t2_atomics_Ts] []) end | iot true bs ((t0 as Const (\<^const_name>\Let\, _)) $ t1 $ t2) = iot true bs (t0 $ t1 $ eta_expand (map (snd o snd) bs) t2 1) | iot fool bs (P $ Q) = let val (P', P_atomics_Ts) = iot fool bs P val (Q', Q_atomics_Ts) = iot fool bs Q in (IApp (P', Q'), union (op =) P_atomics_Ts Q_atomics_Ts) end | iot _ _ (Const (c, T)) = (IConst (`(make_fixed_const (SOME type_enc)) c, T, robust_const_type_args thy (c, T)), atomic_types_of T) | iot _ _ (Free (s, T)) = (IConst (`make_fixed_var s, T, []), atomic_types_of T) | iot _ _ (Var (v as (s, _), T)) = (if String.isPrefix Meson_Clausify.new_skolem_var_prefix s then let val Ts = T |> strip_type |> swap |> op :: val s' = new_skolem_const_name s (length Ts) in IConst (`(make_fixed_const (SOME type_enc)) s', T, Ts) end else IVar ((make_schematic_var v, s), T), atomic_types_of T) | iot _ bs (Bound j) = nth bs j |> (fn (_, (name, T)) => (IConst (name, T, []), atomic_types_of T)) | iot fool bs (Abs (s, T, t)) = let fun vary s = s |> AList.defined (op =) bs s ? vary o Symbol.bump_string val s = vary s val name = `make_bound_var s val (tm, atomic_Ts) = iot fool ((s, (name, T)) :: bs) t in (IAbs ((name, T), tm), union (op =) atomic_Ts (atomic_types_of T)) end in iot (is_type_enc_fool type_enc) end (* "_query" and "_at" are for the ASCII-challenged Metis and Mirabelle. *) val queries = ["?", "_query"] val ats = ["@", "_at"] fun try_unsuffixes ss s = fold (fn s' => fn NONE => try (unsuffix s') s | some => some) ss NONE fun type_enc_of_string strictness s = let val (poly, s) = (case try (unprefix "tc_") s of SOME s => (SOME Type_Class_Polymorphic, s) | NONE => (case try (unprefix "poly_") s of SOME s => (SOME (Raw_Polymorphic With_Phantom_Type_Vars), s) | NONE => (case try (unprefix "ml_poly_") s of SOME s => (SOME (Raw_Polymorphic Without_Phantom_Type_Vars), s) | NONE => (case try (unprefix "raw_mono_") s of SOME s => (SOME Raw_Monomorphic, s) | NONE => (case try (unprefix "mono_") s of SOME s => (SOME Mangled_Monomorphic, s) | NONE => (NONE, s)))))) val (level, s) = case try_unsuffixes queries s of SOME s => (case try_unsuffixes queries s of SOME s => (Nonmono_Types (strictness, Non_Uniform), s) | NONE => (Nonmono_Types (strictness, Uniform), s)) | NONE => (case try_unsuffixes ats s of SOME s => (Undercover_Types, s) | NONE => (All_Types, s)) fun native_of_string s = let + val (_, s) = + (case try (unsuffix "_arith") s of + SOME s => (true, s) + | NONE => (false, s)) + val syntax = {with_ite = true, with_let = true} val (fool, core) = (case try (unsuffix "_fool") s of - SOME s => (With_FOOL, s) + SOME s => (With_FOOL syntax, s) | NONE => (Without_FOOL, s)) - val syntax = {with_ite = (fool = With_FOOL)} in (case (core, poly) of ("native", SOME poly) => (case (poly, level) of (Mangled_Monomorphic, _) => if is_type_level_uniform level then - Native (First_Order, fool, syntax, Mangled_Monomorphic, level) + Native (First_Order, fool, Mangled_Monomorphic, level) else raise Same.SAME | (Raw_Monomorphic, _) => raise Same.SAME - | (poly, All_Types) => Native (First_Order, fool, syntax, poly, All_Types)) + | (poly, All_Types) => Native (First_Order, fool, poly, All_Types)) | ("native_higher", SOME poly) => (case (poly, level) of (_, Nonmono_Types _) => raise Same.SAME | (_, Undercover_Types) => raise Same.SAME | (Mangled_Monomorphic, _) => if is_type_level_uniform level then - Native (Higher_Order THF_With_Choice, fool, syntax, Mangled_Monomorphic, level) + Native (Higher_Order THF_With_Choice, With_FOOL syntax, Mangled_Monomorphic, level) else raise Same.SAME | (poly as Raw_Polymorphic _, All_Types) => - Native (Higher_Order THF_With_Choice, fool, syntax, poly, All_Types) + Native (Higher_Order THF_With_Choice, With_FOOL syntax, poly, All_Types) | _ => raise Same.SAME)) end fun nonnative_of_string core = (case (core, poly, level) of ("guards", SOME poly, _) => if (poly = Mangled_Monomorphic andalso level = Undercover_Types) orelse poly = Type_Class_Polymorphic then raise Same.SAME else Guards (poly, level) | ("tags", SOME poly, _) => if (poly = Mangled_Monomorphic andalso level = Undercover_Types) orelse poly = Type_Class_Polymorphic then raise Same.SAME else Tags (poly, level) | ("args", SOME poly, All_Types (* naja *)) => if poly = Type_Class_Polymorphic then raise Same.SAME else Guards (poly, Const_Types Without_Ctr_Optim) | ("args", SOME poly, Nonmono_Types (_, Uniform) (* naja *)) => if poly = Mangled_Monomorphic orelse poly = Type_Class_Polymorphic then raise Same.SAME else Guards (poly, Const_Types With_Ctr_Optim) | ("erased", NONE, All_Types (* naja *)) => Guards (Raw_Polymorphic With_Phantom_Type_Vars, No_Types) | _ => raise Same.SAME) in if String.isPrefix "native" s then native_of_string s else nonnative_of_string s end handle Same.SAME => error ("Unknown type encoding: " ^ quote s) fun min_hologic THF_Lambda_Free _ = THF_Lambda_Free | min_hologic _ THF_Lambda_Free = THF_Lambda_Free | min_hologic THF_Without_Choice _ = THF_Without_Choice | min_hologic _ THF_Without_Choice = THF_Without_Choice | min_hologic _ _ = THF_With_Choice fun adjust_hologic hologic (Higher_Order hologic') = Higher_Order (min_hologic hologic hologic') | adjust_hologic _ type_enc = type_enc -fun adjust_fool Without_FOOL _ = Without_FOOL - | adjust_fool _ fool = fool +fun adjust_syntax {with_ite = ite1, with_let = let1} {with_ite = ite2, with_let = let2} = + {with_ite = ite1 andalso ite2, with_let = let1 andalso let2} + +fun adjust_fool (With_FOOL syntax) (With_FOOL syntax') = With_FOOL (adjust_syntax syntax syntax') + | adjust_fool _ _ = Without_FOOL fun no_type_classes Type_Class_Polymorphic = Raw_Polymorphic With_Phantom_Type_Vars | no_type_classes poly = poly -fun adjust_type_enc (THF (fool, Polymorphic, hologic)) - (Native (order, fool', syntax, poly, level)) = - Native (adjust_hologic hologic order, adjust_fool fool fool', syntax, no_type_classes poly, - level) - | adjust_type_enc (THF (fool, Monomorphic, hologic)) (Native (order, fool', syntax, _, level)) = - Native (adjust_hologic hologic order, adjust_fool fool fool', syntax, Mangled_Monomorphic, +fun adjust_type_enc (THF (poly, syntax, hologic)) (Native (order, fool, poly', level)) = + Native (adjust_hologic hologic order, adjust_fool (With_FOOL syntax) fool, + (case poly of Polymorphic => no_type_classes poly' | Monomorphic => Mangled_Monomorphic), level) - | adjust_type_enc (TFF (fool, Monomorphic)) (Native (_, fool', syntax, _, level)) = - Native (First_Order, adjust_fool fool fool', syntax, Mangled_Monomorphic, level) - | adjust_type_enc (DFG Polymorphic) (Native (_, _, syntax, poly, level)) = - Native (First_Order, Without_FOOL, syntax, poly, level) - | adjust_type_enc (DFG Monomorphic) (Native (_, _, syntax, _, level)) = - Native (First_Order, Without_FOOL, syntax, Mangled_Monomorphic, level) - | adjust_type_enc (TFF (fool, _)) (Native (_, fool', syntax, poly, level)) = - Native (First_Order, adjust_fool fool fool', syntax, no_type_classes poly, level) - | adjust_type_enc format (Native (_, _, _, poly, level)) = + | adjust_type_enc (TFF (poly, fool)) (Native (_, fool', poly', level)) = + Native (First_Order, adjust_fool fool fool', + (case poly of Polymorphic => no_type_classes poly' | Monomorphic => Mangled_Monomorphic), + level) + | adjust_type_enc (DFG Polymorphic) (Native (_, _, poly, level)) = + Native (First_Order, Without_FOOL, poly, level) + | adjust_type_enc (DFG Monomorphic) (Native (_, _, _, level)) = + Native (First_Order, Without_FOOL, Mangled_Monomorphic, level) + | adjust_type_enc format (Native (_, _, poly, level)) = adjust_type_enc format (Guards (no_type_classes poly, level)) | adjust_type_enc CNF_UEQ (type_enc as Guards stuff) = (if is_type_enc_sound type_enc then Tags else Guards) stuff | adjust_type_enc _ type_enc = type_enc fun is_first_order_lambda_free t = (case t of \<^Const_>\Not for t1\ => is_first_order_lambda_free t1 | \<^Const_>\All _ for \Abs (_, _, t')\\ => is_first_order_lambda_free t' | \<^Const_>\All _ for t1\ => is_first_order_lambda_free t1 | \<^Const_>\Ex _ for \Abs (_, _, t')\\ => is_first_order_lambda_free t' | \<^Const_>\Ex _ for t1\ => is_first_order_lambda_free t1 | \<^Const_>\conj for t1 t2\ => is_first_order_lambda_free t1 andalso is_first_order_lambda_free t2 | \<^Const_>\disj for t1 t2\ => is_first_order_lambda_free t1 andalso is_first_order_lambda_free t2 | \<^Const_>\implies for t1 t2\ => is_first_order_lambda_free t1 andalso is_first_order_lambda_free t2 | \<^Const_>\HOL.eq \<^Type>\bool\ for t1 t2\ => is_first_order_lambda_free t1 andalso is_first_order_lambda_free t2 | _ => not (exists_subterm (fn Abs _ => true | _ => false) t)) fun simple_translate_lambdas eta_matters do_lambdas ctxt type_enc t = if not eta_matters andalso is_first_order_lambda_free t then t else let fun trans_first_order Ts t = (case t of \<^Const_>\Not for t1\ => \<^Const>\Not for \trans_first_order Ts t1\\ | (t0 as \<^Const_>\All _\) $ Abs (s, T, t') => t0 $ Abs (s, T, trans_first_order (T :: Ts) t') | (t0 as \<^Const_>\All _\) $ t1 => trans_first_order Ts (t0 $ eta_expand Ts t1 1) | (t0 as \<^Const_>\Ex _\) $ Abs (s, T, t') => t0 $ Abs (s, T, trans_first_order (T :: Ts) t') | (t0 as \<^Const_>\Ex _\) $ t1 => trans_first_order Ts (t0 $ eta_expand Ts t1 1) | (t0 as \<^Const_>\conj\) $ t1 $ t2 => t0 $ trans_first_order Ts t1 $ trans_first_order Ts t2 | (t0 as \<^Const_>\disj\) $ t1 $ t2 => t0 $ trans_first_order Ts t1 $ trans_first_order Ts t2 | (t0 as \<^Const_>\implies\) $ t1 $ t2 => t0 $ trans_first_order Ts t1 $ trans_first_order Ts t2 | (t0 as \<^Const_>\HOL.eq \<^Type>\bool\\) $ t1 $ t2 => t0 $ trans_first_order Ts t1 $ trans_first_order Ts t2 | _ => if not (exists_subterm (fn Abs _ => true | _ => false) t) then t else t |> Envir.eta_contract |> do_lambdas ctxt Ts) fun trans_fool Ts t = (case t of (t1 as Const (\<^const_name>\Let\, _)) $ t2 $ t3 => (case t3 of Abs (s3, T, t') => t1 $ trans_fool Ts t2 $ Abs (s3, T, trans_fool (T :: Ts) t') | _ => trans_fool Ts (t1 $ trans_fool Ts t2 $ eta_expand Ts t3 1)) | (t0 as Const (\<^const_name>\All\, _)) $ t1 => (case t1 of Abs (s, T, t') => t0 $ Abs (s, T, trans_fool (T :: Ts) t') | _ => trans_fool Ts (t0 $ eta_expand Ts t1 1)) | (t0 as Const (\<^const_name>\Ex\, _)) $ t1 => (case t1 of Abs (s, T, t') => t0 $ Abs (s, T, trans_fool (T :: Ts) t') | _ => trans_fool Ts (t0 $ eta_expand Ts t1 1)) | t1 $ t2 => trans_fool Ts t1 $ trans_fool Ts t2 | Abs _ => t |> Envir.eta_contract |> do_lambdas ctxt Ts | _ => t) val (t', ctxt') = yield_singleton (Variable.import_terms true) t ctxt in t' |> (if is_type_enc_fool type_enc then trans_fool else trans_first_order) [] |> singleton (Variable.export_terms ctxt' ctxt) end fun do_cheaply_conceal_lambdas Ts (t1 $ t2) = do_cheaply_conceal_lambdas Ts t1 $ do_cheaply_conceal_lambdas Ts t2 | do_cheaply_conceal_lambdas Ts (Abs (_, T, t)) = Const (lam_lifted_poly_prefix ^ serial_string (), T --> fastype_of1 (T :: Ts, t)) | do_cheaply_conceal_lambdas _ t = t fun concealed_bound_name j = atp_weak_prefix ^ string_of_int j fun conceal_bounds Ts t = subst_bounds (map (Free o apfst concealed_bound_name) (0 upto length Ts - 1 ~~ Ts), t) fun reveal_bounds Ts = subst_atomic (map (fn (j, T) => (Free (concealed_bound_name j, T), Bound j)) (0 upto length Ts - 1 ~~ Ts)) fun do_introduce_combinators ctxt Ts t = (t |> conceal_bounds Ts |> Thm.cterm_of ctxt |> Meson_Clausify.introduce_combinators_in_cterm ctxt |> Thm.prop_of |> Logic.dest_equals |> snd |> reveal_bounds Ts) (* A type variable of sort "{}" will make abstraction fail. *) handle THM _ => t |> do_cheaply_conceal_lambdas Ts val introduce_combinators = simple_translate_lambdas false do_introduce_combinators fun constify_lifted (t $ u) = constify_lifted t $ constify_lifted u | constify_lifted (Abs (s, T, t)) = Abs (s, T, constify_lifted t) | constify_lifted (Free (x as (s, _))) = (if String.isPrefix lam_lifted_prefix s then Const else Free) x | constify_lifted t = t fun is_binder true (Const (\<^const_name>\Let\, _) $ _) = true | is_binder _ t = Lambda_Lifting.is_quantifier t fun lift_lams_part_1 ctxt type_enc = map hol_close_form #> rpair ctxt #-> Lambda_Lifting.lift_lambdas (SOME ((if is_type_enc_polymorphic type_enc then lam_lifted_poly_prefix else lam_lifted_mono_prefix) ^ "_a")) (is_binder (is_type_enc_fool type_enc)) #> fst fun lift_lams_part_2 ctxt type_enc (facts, lifted) = (facts, lifted) (* Lambda-lifting sometimes leaves some lambdas around; we need some way to get rid of them *) |> apply2 (map (introduce_combinators ctxt type_enc)) |> apply2 (map constify_lifted) (* Requires bound variables not to clash with any schematic variables (as should be the case right after lambda-lifting). *) |>> map (hol_open_form (unprefix hol_close_form_prefix)) ||> map (hol_open_form I) fun lift_lams ctxt type_enc = (is_type_enc_fool type_enc ? map (simple_translate_lambdas true (fn _ => fn _ => fn t => t) ctxt type_enc)) #> lift_lams_part_1 ctxt type_enc #> lift_lams_part_2 ctxt type_enc fun intentionalize_def (Const (\<^const_name>\All\, _) $ Abs (_, _, t)) = intentionalize_def t | intentionalize_def (Const (\<^const_name>\HOL.eq\, _) $ t $ u) = let fun lam T t = Abs (Name.uu, T, t) val (head, args) = strip_comb t ||> rev val head_T = fastype_of head val n = length args val arg_Ts = head_T |> binder_types |> take n |> rev val u = u |> subst_atomic (args ~~ map Bound (0 upto n - 1)) in HOLogic.eq_const head_T $ head $ fold lam arg_Ts u end | intentionalize_def t = t type ifact = {name : string, stature : stature, role : atp_formula_role, iformula : (string * string, typ, iterm, string * string) atp_formula, atomic_types : typ list} fun update_iformula f ({name, stature, role, iformula, atomic_types} : ifact) = {name = name, stature = stature, role = role, iformula = f iformula, atomic_types = atomic_types} : ifact fun ifact_lift f ({iformula, ...} : ifact) = f iformula fun insert_type thy get_T x xs = let val T = get_T x in if exists (type_instance thy T o get_T) xs then xs else x :: filter_out (type_generalization thy T o get_T) xs end fun chop_fun 0 T = ([], T) | chop_fun n (Type (\<^type_name>\fun\, [dom_T, ran_T])) = chop_fun (n - 1) ran_T |>> cons dom_T | chop_fun _ T = ([], T) fun filter_type_args thy ctrss type_enc s ary T_args = let val poly = polymorphism_of_type_enc type_enc in if s = type_tag_name then (* FIXME: why not "type_guard_name" as well? *) T_args else (case type_enc of - Native (_, _, _, Raw_Polymorphic _, _) => T_args - | Native (_, _, _, Type_Class_Polymorphic, _) => T_args + Native (_, _, Raw_Polymorphic _, _) => T_args + | Native (_, _, Type_Class_Polymorphic, _) => T_args | _ => let fun gen_type_args _ _ [] = [] | gen_type_args keep strip_ty T_args = let val U = robust_const_type thy s val (binder_Us, body_U) = strip_ty U val in_U_vars = fold Term.add_tvarsT binder_Us [] val out_U_vars = Term.add_tvarsT body_U [] fun filt (U_var, T) = if keep (member (op =) in_U_vars U_var, member (op =) out_U_vars U_var) then T else dummyT val U_args = (s, U) |> robust_const_type_args thy in map (filt o apfst dest_TVar) (U_args ~~ T_args) end handle TYPE _ => T_args fun is_always_ctr (s', T') = s' = s andalso type_equiv thy (T', robust_const_type thy s') val noninfer_type_args = gen_type_args (not o fst) (chop_fun ary) val ctr_infer_type_args = gen_type_args fst strip_type val level = level_of_type_enc type_enc in if level = No_Types orelse s = \<^const_name>\HOL.eq\ orelse (case level of Const_Types _ => s = app_op_name | _ => false) then [] else if poly = Mangled_Monomorphic then T_args else if level = All_Types then (case type_enc of Guards _ => noninfer_type_args T_args | Tags _ => []) else if level = Undercover_Types then noninfer_type_args T_args else if level <> Const_Types Without_Ctr_Optim andalso exists (exists is_always_ctr) ctrss then ctr_infer_type_args T_args else T_args end) end fun raw_atp_type_of_typ type_enc = let fun term (Type (s, Ts)) = AType ((if s = \<^type_name>\fun\ andalso is_type_enc_higher_order type_enc then `I tptp_fun_type else if s = \<^type_name>\bool\ andalso (is_type_enc_full_higher_order type_enc orelse is_type_enc_fool type_enc) then `I tptp_bool_type else `make_fixed_type_const s, []), map term Ts) | term (TFree (s, _)) = AType ((`make_tfree s, []), []) | term (TVar z) = AType ((tvar_name z, []), []) in term end fun atp_term_of_atp_type (AType ((name, _), tys)) = ATerm ((name, []), map atp_term_of_atp_type tys) | atp_term_of_atp_type _ = raise Fail "unexpected type" fun atp_type_of_type_arg type_enc T = if T = dummyT then NONE else SOME (raw_atp_type_of_typ type_enc T) (* This shouldn't clash with anything else. *) val uncurried_alias_sep = "\000" val mangled_type_sep = "\001" val ascii_of_uncurried_alias_sep = ascii_of uncurried_alias_sep fun generic_mangled_type_name f (AType ((name, _), [])) = f name | generic_mangled_type_name f (AType ((name, _), tys)) = f name ^ "(" ^ space_implode "," (map (generic_mangled_type_name f) tys) ^ ")" | generic_mangled_type_name _ _ = raise Fail "unexpected type" fun mangled_type type_enc = generic_mangled_type_name fst o raw_atp_type_of_typ type_enc fun make_native_type s = if s = tptp_bool_type orelse s = tptp_fun_type orelse s = tptp_individual_type then s else native_type_prefix ^ ascii_of s fun native_atp_type_of_raw_atp_type type_enc pred_sym ary = let fun to_mangled_atype ty = AType (((make_native_type (generic_mangled_type_name fst ty), generic_mangled_type_name snd ty), []), []) fun to_poly_atype (AType ((name, clss), tys)) = AType ((name, clss), map to_poly_atype tys) | to_poly_atype _ = raise Fail "unexpected type" val to_atype = if is_type_enc_polymorphic type_enc then to_poly_atype else to_mangled_atype fun to_afun f1 f2 tys = AFun (f1 (hd tys), f2 (nth tys 1)) fun to_ho (ty as AType (((s, _), _), tys)) = if s = tptp_fun_type then to_afun to_ho to_ho tys else to_atype ty | to_ho _ = raise Fail "unexpected type" fun to_lfho (ty as AType (((s, _), _), tys)) = if s = tptp_fun_type then to_afun to_ho to_lfho tys else if pred_sym then bool_atype else to_atype ty | to_lfho _ = raise Fail "unexpected type" fun to_fo 0 ty = if pred_sym then bool_atype else to_atype ty | to_fo ary (AType (_, tys)) = to_afun to_atype (to_fo (ary - 1)) tys | to_fo _ _ = raise Fail "unexpected type" in if is_type_enc_full_higher_order type_enc then to_ho else if is_type_enc_higher_order type_enc then to_lfho else to_fo ary end fun native_atp_type_of_typ type_enc pred_sym ary = native_atp_type_of_raw_atp_type type_enc pred_sym ary o raw_atp_type_of_typ type_enc (* Make atoms for sorted type variables. *) fun generic_add_sorts_on_type _ [] = I | generic_add_sorts_on_type T (s :: ss) = generic_add_sorts_on_type T ss #> (if s = the_single \<^sort>\type\ then I else insert (op =) (s, T)) fun add_sorts_on_tfree (T as TFree (_, S)) = generic_add_sorts_on_type T S | add_sorts_on_tfree _ = I fun add_sorts_on_tvar (T as TVar (_, S)) = generic_add_sorts_on_type T S | add_sorts_on_tvar _ = I fun process_type_args type_enc T_args = if is_type_enc_native type_enc then (map (native_atp_type_of_typ type_enc false 0) T_args, []) else ([], map_filter (Option.map atp_term_of_atp_type o atp_type_of_type_arg type_enc) T_args) fun class_atom type_enc (cl, T) = let val cl = `make_class cl val (ty_args, tm_args) = process_type_args type_enc [T] val tm_args = tm_args @ (case type_enc of - Native (_, _, _, Raw_Polymorphic Without_Phantom_Type_Vars, _) => + Native (_, _, Raw_Polymorphic Without_Phantom_Type_Vars, _) => [ATerm ((TYPE_name, ty_args), [])] | _ => []) in AAtom (ATerm ((cl, ty_args), tm_args)) end fun class_atoms type_enc (cls, T) = map (fn cl => class_atom type_enc (cl, T)) cls fun class_membs_of_types type_enc add_sorts_on_typ Ts = [] |> level_of_type_enc type_enc <> No_Types ? fold add_sorts_on_typ Ts fun mk_aconns c = split_last #> uncurry (fold_rev (mk_aconn c)) fun mk_ahorn [] phi = phi | mk_ahorn phis psi = AConn (AImplies, [mk_aconns AAnd phis, psi]) fun mk_aquant _ [] phi = phi | mk_aquant q xs (phi as AQuant (q', xs', phi')) = if q = q' then AQuant (q, xs @ xs', phi') else AQuant (q, xs, phi) | mk_aquant q xs phi = AQuant (q, xs, phi) fun mk_atyquant _ [] phi = phi | mk_atyquant q xs (phi as ATyQuant (q', xs', phi')) = if q = q' then ATyQuant (q, xs @ xs', phi') else ATyQuant (q, xs, phi) | mk_atyquant q xs phi = ATyQuant (q, xs, phi) fun close_universally add_term_vars phi = let fun add_formula_vars bounds (ATyQuant (_, _, phi)) = add_formula_vars bounds phi | add_formula_vars bounds (AQuant (_, xs, phi)) = add_formula_vars (map fst xs @ bounds) phi | add_formula_vars bounds (AConn (_, phis)) = fold (add_formula_vars bounds) phis | add_formula_vars bounds (AAtom tm) = add_term_vars bounds tm in mk_aquant AForall (rev (add_formula_vars [] phi [])) phi end fun add_term_vars bounds (ATerm ((name as (s, _), _), tms)) = (if is_tptp_variable s andalso not (String.isPrefix tvar_prefix s) andalso not (member (op =) bounds name) then insert (op =) (name, NONE) else I) #> fold (add_term_vars bounds) tms | add_term_vars bounds (AAbs (((name, _), tm), args)) = add_term_vars (name :: bounds) tm #> fold (add_term_vars bounds) args fun close_formula_universally phi = close_universally add_term_vars phi fun add_iterm_vars bounds (IApp (tm1, tm2)) = fold (add_iterm_vars bounds) [tm1, tm2] | add_iterm_vars _ (IConst _) = I | add_iterm_vars bounds (IVar (name, T)) = not (member (op =) bounds name) ? insert (op =) (name, SOME T) | add_iterm_vars bounds (IAbs (_, tm)) = add_iterm_vars bounds tm fun aliased_uncurried ary (s, s') = (s ^ ascii_of_uncurried_alias_sep ^ string_of_int ary, s' ^ string_of_int ary) fun unaliased_uncurried (s, s') = (case space_explode uncurried_alias_sep s of [_] => (s, s') | [s1, s2] => (s1, unsuffix s2 s') | _ => raise Fail "ill-formed explicit application alias") fun raw_mangled_const_name type_name ty_args (s, s') = let fun type_suffix f g = fold_rev (prefix o g o prefix mangled_type_sep o type_name f) ty_args "" in (s ^ type_suffix fst ascii_of, s' ^ type_suffix snd I) end fun mangled_const_name type_enc = map_filter (atp_type_of_type_arg type_enc) #> raw_mangled_const_name generic_mangled_type_name val parse_mangled_ident = Scan.many1 (not o member (op =) ["(", ")", ","]) >> implode fun parse_mangled_type x = (parse_mangled_ident -- Scan.optional ($$ "(" |-- Scan.optional parse_mangled_types [] --| $$ ")") [] >> (ATerm o apfst (rpair []))) x and parse_mangled_types x = (parse_mangled_type ::: Scan.repeat ($$ "," |-- parse_mangled_type)) x fun unmangled_type s = s |> suffix ")" |> raw_explode |> Scan.finite Symbol.stopper (Scan.error (!! (fn _ => raise Fail ("unrecognized mangled type " ^ quote s)) parse_mangled_type)) |> fst fun unmangled_const_name s = (s, s) |> unaliased_uncurried |> fst |> space_explode mangled_type_sep fun unmangled_const s = let val ss = unmangled_const_name s in (hd ss, map unmangled_type (tl ss)) end val unmangled_invert_const = invert_const o hd o unmangled_const_name -fun vars_of_iterm vars (IConst ((s, _), _, _)) = insert (op =) s vars - | vars_of_iterm vars (IVar ((s, _), _)) = insert (op =) s vars - | vars_of_iterm vars (IApp (tm1, tm2)) = union (op =) (vars_of_iterm vars tm1) (vars_of_iterm vars tm2) - | vars_of_iterm vars (IAbs (_, tm)) = vars_of_iterm vars tm +fun vars_of_iterm (IConst ((s, _), _, _)) = [s] + | vars_of_iterm (IVar ((s, _), _)) = [s] + | vars_of_iterm (IApp (tm1, tm2)) = union (op =) (vars_of_iterm tm1) (vars_of_iterm tm2) + | vars_of_iterm (IAbs (((s, _), _), tm)) = insert (op =) s (vars_of_iterm tm) fun generate_unique_name gen unique n = let val x = gen n in if unique x then x else generate_unique_name gen unique (n + 1) end fun eta_expand_quantifier_body (tm as IAbs _) = tm | eta_expand_quantifier_body tm = let (* We accumulate all variables because E 2.5 does not support variable shadowing. *) - val vars = vars_of_iterm [] tm + val vars = vars_of_iterm tm val x = generate_unique_name (fn n => "X" ^ (if n = 0 then "" else string_of_int n)) (fn name => not (exists (equal name) vars)) 0 |> `(prefix bound_var_prefix) val T = domain_type (ityp_of tm) in IAbs ((x, T), IApp (tm, IConst (x, T, []))) end fun introduce_builtins_and_proxies_in_iterm type_enc = let val is_fool = is_type_enc_fool type_enc val has_ite = has_type_enc_ite type_enc + val has_let = has_type_enc_let type_enc + val has_choice = has_type_enc_choice type_enc fun tweak_ho_quant ho_quant (T as Type (_, [p_T as Type (_, [x_T, _]), _])) [] = (* Eta-expand "!!" and "??", to work around LEO-II, Leo-III, and Satallax parser limitations. This works in conjunction with special code in "ATP_Problem" that uses the syntactic sugar "!" and "?" whenever possible. *) IAbs ((`I "P", p_T), IApp (IConst (`I ho_quant, T, []), IAbs ((`I "X", x_T), IApp (IConst (`I "P", p_T, []), IConst (`I "X", x_T, []))))) | tweak_ho_quant ho_quant T _ = IConst (`I ho_quant, T, []) fun tweak_ho_equal T argc = if argc = 2 then IConst (`I tptp_equal, T, []) else (* Eta-expand partially applied THF equality, because the LEO-II and Satallax parsers complain about not being able to infer the type of "=". *) let val i_T = domain_type T in IAbs ((`I "Y", i_T), IAbs ((`I "Z", i_T), IApp (IApp (IConst (`I tptp_equal, T, []), IConst (`I "Y", i_T, [])), IConst (`I "Z", i_T, [])))) end fun intro top_level args (IApp (tm1, tm2)) = let val tm1' = intro top_level (tm2 :: args) tm1 val tm2' = intro false [] tm2 val tm2'' = (case tm1' of IApp (IConst ((s, _), _, _), _) => if s = tptp_let then (case tm2' of IAbs ((name, T), tm) => let val name' = map_prod (prefix let_bound_var_prefix o unprefix bound_var_prefix) I name in IAbs ((name', T), alpha_rename name name' tm) end | _ => error "Function abstraction expected") else tm2' | IConst ((s, _), _, _) => - if s = tptp_ho_forall orelse s = tptp_ho_exists then + if s = tptp_ho_forall orelse s = tptp_ho_exists orelse s = tptp_choice then eta_expand_quantifier_body tm2' else tm2' | _ => tm2') in IApp (tm1', tm2'') end | intro top_level args (IConst (name as (s, _), T, T_args)) = let val argc = length args in if has_ite andalso s = "c_If" andalso argc >= 3 then IConst (`I tptp_ite, T, []) - else if is_fool andalso s = "c_Let" andalso argc >= 2 then + else if has_let andalso s = "c_Let" andalso argc >= 2 then IConst (`I tptp_let, T, []) else (case proxify_const s of SOME proxy_base => let fun plain_const () = IConst (name, T, []) fun proxy_const () = IConst (proxy_base |>> prefix const_prefix, T, T_args) fun handle_fool card x = if card = argc then x else proxy_const () + fun handle_min_card card x = if argc < card then proxy_const () else x in if top_level then (case s of "c_False" => IConst (`I tptp_false, T, []) | "c_True" => IConst (`I tptp_true, T, []) | _ => plain_const ()) else if is_type_enc_full_higher_order type_enc then (case s of "c_False" => IConst (`I tptp_false, T, []) | "c_True" => IConst (`I tptp_true, T, []) | "c_Not" => IConst (`I tptp_not, T, []) | "c_conj" => IConst (`I tptp_and, T, []) | "c_disj" => IConst (`I tptp_or, T, []) | "c_implies" => IConst (`I tptp_implies, T, []) | "c_All" => tweak_ho_quant tptp_ho_forall T args | "c_Ex" => tweak_ho_quant tptp_ho_exists T args + | "c_Choice" => + if has_choice then + handle_min_card 1 (IConst (`I tptp_choice, T, [])) + else + proxy_const () | s => if is_tptp_equal s then tweak_ho_equal T argc else plain_const ()) else if is_fool then (case s of "c_False" => IConst (`I tptp_false, T, []) | "c_True" => IConst (`I tptp_true, T, []) | "c_Not" => handle_fool 1 (IConst (`I tptp_not, T, [])) | "c_conj" => handle_fool 2 (IConst (`I tptp_and, T, [])) | "c_disj" => handle_fool 2 (IConst (`I tptp_or, T, [])) | "c_implies" => handle_fool 2 (IConst (`I tptp_implies, T, [])) | "c_All" => handle_fool 1 (tweak_ho_quant tptp_ho_forall T args) | "c_Ex" => handle_fool 1 (tweak_ho_quant tptp_ho_exists T args) + | "c_Choice" => proxy_const () | s => if is_tptp_equal s then handle_fool 2 (IConst (`I tptp_equal, T, [])) else plain_const ()) else proxy_const () end | NONE => if s = tptp_choice then tweak_ho_quant tptp_choice T args else IConst (name, T, T_args)) end | intro _ _ (IAbs (bound, tm)) = IAbs (bound, intro false [] tm) | intro _ _ tm = tm in intro true [] end fun mangle_type_args_in_const type_enc (name as (s, _)) T_args = if String.isPrefix const_prefix s andalso is_type_enc_mangling type_enc then (mangled_const_name type_enc T_args name, []) else (name, T_args) fun mangle_type_args_in_iterm type_enc = if is_type_enc_mangling type_enc then let fun mangle (IApp (tm1, tm2)) = IApp (mangle tm1, mangle tm2) | mangle (tm as IConst (_, _, [])) = tm | mangle (IConst (name, T, T_args)) = mangle_type_args_in_const type_enc name T_args |> (fn (name, T_args) => IConst (name, T, T_args)) | mangle (IAbs (bound, tm)) = IAbs (bound, mangle tm) | mangle tm = tm in mangle end else I fun filter_type_args_in_const _ _ _ _ _ [] = [] | filter_type_args_in_const thy ctrss type_enc ary s T_args = (case unprefix_and_unascii const_prefix s of NONE => if level_of_type_enc type_enc = No_Types orelse s = tptp_choice then [] else T_args | SOME s'' => filter_type_args thy ctrss type_enc (unmangled_invert_const s'') ary T_args) fun filter_type_args_in_iterm thy ctrss type_enc = let fun filt ary (IApp (tm1, tm2)) = IApp (filt (ary + 1) tm1, filt 0 tm2) | filt ary (IConst (name as (s, _), T, T_args)) = filter_type_args_in_const thy ctrss type_enc ary s T_args |> (fn T_args => IConst (name, T, T_args)) | filt _ (IAbs (bound, tm)) = IAbs (bound, filt 0 tm) | filt _ tm = tm in filt 0 end fun iformula_of_prop ctxt type_enc iff_for_eq = let val thy = Proof_Context.theory_of ctxt fun do_term bs t atomic_Ts = iterm_of_term thy type_enc bs (Envir.eta_contract t) |>> (introduce_builtins_and_proxies_in_iterm type_enc #> mangle_type_args_in_iterm type_enc #> AAtom) ||> union (op =) atomic_Ts fun do_quant bs q pos s T t' = let val s = singleton (Name.variant_list (map fst bs)) s val universal = Option.map (q = AExists ? not) pos val name = s |> `(case universal of SOME true => make_all_bound_var | SOME false => make_exist_bound_var | NONE => make_bound_var) in do_formula ((s, (name, T)) :: bs) pos t' #>> mk_aquant q [(name, SOME T)] ##> union (op =) (atomic_types_of T) end and do_conn bs c pos1 t1 pos2 t2 = do_formula bs pos1 t1 ##>> do_formula bs pos2 t2 #>> uncurry (mk_aconn c) and do_formula bs pos t = (case t of \<^Const_>\Trueprop for t1\ => do_formula bs pos t1 | \<^Const_>\Not for t1\ => do_formula bs (Option.map not pos) t1 #>> mk_anot | \<^Const_>\All _ for \Abs (s, T, t')\\ => do_quant bs AForall pos s T t' | (t0 as \<^Const_>\All _\) $ t1 => do_formula bs pos (t0 $ eta_expand (map (snd o snd) bs) t1 1) | \<^Const_>\Ex _ for \Abs (s, T, t')\\ => do_quant bs AExists pos s T t' | (t0 as \<^Const_>\Ex _\) $ t1 => do_formula bs pos (t0 $ eta_expand (map (snd o snd) bs) t1 1) | \<^Const_>\conj for t1 t2\ => do_conn bs AAnd pos t1 pos t2 | \<^Const_>\disj for t1 t2\ => do_conn bs AOr pos t1 pos t2 | \<^Const_>\implies for t1 t2\ => do_conn bs AImplies (Option.map not pos) t1 pos t2 | \<^Const_>\HOL.eq \<^Type>\bool\ for t1 t2\ => if iff_for_eq then do_conn bs AIff NONE t1 NONE t2 else do_term bs t | _ => do_term bs t) in do_formula [] end fun presimplify_term simp_options ctxt t = if exists_Const (member (op =) Meson.presimplified_consts o fst) t then t |> Skip_Proof.make_thm (Proof_Context.theory_of ctxt) |> Meson.presimplify simp_options ctxt |> Thm.prop_of else t fun preprocess_abstractions_in_terms trans_lams facts = let val (facts, lambda_ts) = facts |> map (snd o snd) |> trans_lams |>> map2 (fn (name, (role, _)) => fn t => (name, (role, t))) facts val lam_facts = map2 (fn t => fn j => ((lam_fact_prefix ^ Int.toString j, (Global, Non_Rec_Def)), (Axiom, t))) lambda_ts (1 upto length lambda_ts) in (facts, lam_facts) end (* Metis's use of "resolve_tac" freezes the schematic variables. We simulate this in Sledgehammer to prevent the discovery of unreplayable proofs. *) fun freeze_term t = let (* Freshness is desirable for completeness, but not for soundness. *) fun indexed_name (s, i) = s ^ "_" ^ string_of_int i ^ atp_weak_suffix fun freeze (t $ u) = freeze t $ freeze u | freeze (Abs (s, T, t)) = Abs (s, T, freeze t) | freeze (Var (x, T)) = Free (indexed_name x, T) | freeze t = t fun freeze_tvar (x, S) = TFree (indexed_name x, S) in t |> exists_subterm is_Var t ? freeze |> exists_type (exists_subtype is_TVar) t ? map_types (map_type_tvar freeze_tvar) end fun presimp_prop simp_options ctxt type_enc t = let val t = t |> Envir.beta_eta_contract |> transform_elim_prop |> Object_Logic.atomize_term ctxt val need_trueprop = (fastype_of t = \<^typ>\bool\) val is_ho = is_type_enc_full_higher_order type_enc in t |> need_trueprop ? HOLogic.mk_Trueprop |> (if is_ho then unextensionalize_def else cong_extensionalize_term ctxt #> abs_extensionalize_term ctxt) |> presimplify_term simp_options ctxt |> HOLogic.dest_Trueprop end handle TERM _ => \<^Const>\True\ (* Satallax prefers "=" to "<=>" (for definitions) and Metis (CNF) requires "=" for technical reasons. *) fun should_use_iff_for_eq CNF _ = false | should_use_iff_for_eq (THF _) format = not (is_type_enc_full_higher_order format) | should_use_iff_for_eq _ _ = true fun make_formula ctxt format type_enc iff_for_eq name stature role t = let val iff_for_eq = iff_for_eq andalso should_use_iff_for_eq format type_enc val (iformula, atomic_Ts) = iformula_of_prop ctxt type_enc iff_for_eq (SOME (role <> Conjecture)) t [] |>> close_universally add_iterm_vars in {name = name, stature = stature, role = role, iformula = iformula, atomic_types = atomic_Ts} end fun make_fact ctxt format type_enc iff_for_eq ((name, stature), t) = (case make_formula ctxt format type_enc iff_for_eq name stature Axiom t of formula as {iformula = AAtom (IConst ((s, _), _, _)), ...} => if s = tptp_true then NONE else SOME formula | formula => SOME formula) fun make_conjecture ctxt format type_enc = map (fn ((name, stature), (role, t)) => let val t' = t |> role = Conjecture ? s_not in make_formula ctxt format type_enc true name stature role t' end) (** Finite and infinite type inference **) fun tvar_footprint thy s ary = (case unprefix_and_unascii const_prefix s of SOME s => let fun tvars_of T = [] |> Term.add_tvarsT T |> map fst in s |> unmangled_invert_const |> robust_const_type thy |> chop_fun ary |> fst |> map tvars_of end | NONE => []) handle TYPE _ => [] fun type_arg_cover thy pos s ary = if is_tptp_equal s then if pos = SOME false then [] else 0 upto ary - 1 else let val footprint = tvar_footprint thy s ary val eq = (s = \<^const_name>\HOL.eq\) fun cover _ [] = [] | cover seen ((i, tvars) :: args) = cover (union (op =) seen tvars) args |> (eq orelse exists (fn tvar => not (member (op =) seen tvar)) tvars) ? cons i in if forall null footprint then [] else 0 upto length footprint - 1 ~~ footprint |> sort (rev_order o list_ord Term_Ord.indexname_ord o apply2 snd) |> cover [] end type monotonicity_info = {maybe_finite_Ts : typ list, surely_infinite_Ts : typ list, maybe_nonmono_Ts : typ list} (* These types witness that the type classes they belong to allow infinite models and hence that any types with these type classes is monotonic. *) val known_infinite_types = [\<^typ>\nat\, HOLogic.intT, HOLogic.realT, \<^typ>\nat => bool\] fun is_type_kind_of_surely_infinite ctxt strictness cached_Ts T = strictness <> Strict andalso is_type_surely_infinite ctxt true cached_Ts T (* Finite types such as "unit", "bool", "bool * bool", and "bool => bool" are dangerous because their "exhaust" properties can easily lead to unsound ATP proofs. On the other hand, all HOL infinite types can be given the same models in first-order logic (via Loewenheim-Skolem). *) fun should_encode_type ctxt {maybe_finite_Ts, surely_infinite_Ts, maybe_nonmono_Ts} (Nonmono_Types (strictness, _)) T = let val thy = Proof_Context.theory_of ctxt in (exists (type_intersect thy T) maybe_nonmono_Ts andalso not (exists (type_instance thy T) surely_infinite_Ts orelse (not (member (type_equiv thy) maybe_finite_Ts T) andalso is_type_kind_of_surely_infinite ctxt strictness surely_infinite_Ts T))) end | should_encode_type _ _ level _ = (level = All_Types orelse level = Undercover_Types) fun should_guard_type ctxt mono (Guards (_, level)) should_guard_var T = should_guard_var () andalso should_encode_type ctxt mono level T | should_guard_type _ _ _ _ _ = false fun is_maybe_universal_name s = String.isPrefix bound_var_prefix s orelse String.isPrefix all_bound_var_prefix s fun is_maybe_universal_var (IConst ((s, _), _, _)) = is_maybe_universal_name s | is_maybe_universal_var (IVar _) = true | is_maybe_universal_var _ = false datatype site = Top_Level of bool option | Eq_Arg of bool option | Arg of string * int * int | Elsewhere fun should_tag_with_type _ _ _ (Top_Level _) _ _ = false | should_tag_with_type ctxt mono (Tags (_, level)) site u T = let val thy = Proof_Context.theory_of ctxt in (case level of Nonmono_Types (_, Non_Uniform) => (case (site, is_maybe_universal_var u) of (Eq_Arg pos, true) => (pos <> SOME false orelse tag_neg_vars) andalso should_encode_type ctxt mono level T | _ => false) | Undercover_Types => (case (site, is_maybe_universal_var u) of (Eq_Arg pos, true) => pos <> SOME false | (Arg (s, j, ary), true) => member (op =) (type_arg_cover thy NONE s ary) j | _ => false) | _ => should_encode_type ctxt mono level T) end | should_tag_with_type _ _ _ _ _ _ = false (** predicators and application operators **) type sym_info = {pred_sym : bool, min_ary : int, max_ary : int, types : typ list, in_conj : bool} fun default_sym_tab_entries type_enc = let fun mk_sym_info pred n = {pred_sym = pred, min_ary = n, max_ary = n, types = [], in_conj = false} in (make_fixed_const NONE \<^const_name>\undefined\, mk_sym_info false 0) :: (map (apsnd (fn {arity, is_predicate} => mk_sym_info is_predicate arity)) (Symtab.dest tptp_builtins)) |> not (is_type_enc_fool type_enc orelse is_type_enc_full_higher_order type_enc) ? cons (prefixed_predicator_name, mk_sym_info true 1) end datatype app_op_level = Min_App_Op | Sufficient_App_Op | Sufficient_App_Op_And_Predicator | Full_App_Op_And_Predicator fun add_iterm_syms_to_sym_table ctxt app_op_level conj_fact = let val thy = Proof_Context.theory_of ctxt fun consider_var_ary const_T var_T max_ary = let fun iter ary T = - if ary = max_ary orelse type_instance thy var_T T orelse type_instance thy T var_T then + if ary = max_ary orelse type_instance thy var_T T orelse type_instance thy T var_T orelse + not (can dest_funT T) then ary else iter (ary + 1) (range_type T) in iter 0 const_T end fun add_universal_var T (accum as ((bool_vars, fun_var_Ts), sym_tab)) = if (app_op_level = Sufficient_App_Op andalso can dest_funT T) orelse (app_op_level = Sufficient_App_Op_And_Predicator andalso (can dest_funT T orelse T = \<^typ>\bool\)) then let val bool_vars' = bool_vars orelse (app_op_level = Sufficient_App_Op_And_Predicator andalso body_type T = \<^typ>\bool\) fun repair_min_ary {pred_sym, min_ary, max_ary, types, in_conj} = {pred_sym = pred_sym andalso not bool_vars', min_ary = fold (fn T' => consider_var_ary T' T) types min_ary, max_ary = max_ary, types = types, in_conj = in_conj} val fun_var_Ts' = fun_var_Ts |> can dest_funT T ? insert_type thy I T in if bool_vars' = bool_vars andalso fun_var_Ts' = fun_var_Ts then accum else ((bool_vars', fun_var_Ts'), Symtab.map (K repair_min_ary) sym_tab) end else accum fun add_iterm_syms top_level tm (accum as ((bool_vars, fun_var_Ts), sym_tab)) = let val (head, args) = strip_iterm_comb tm in (case head of IConst ((s, _), T, _) => if is_maybe_universal_name s then add_universal_var T accum else if String.isPrefix exist_bound_var_prefix s then accum else let val ary = length args in ((bool_vars, fun_var_Ts), (case Symtab.lookup sym_tab s of SOME {pred_sym, min_ary, max_ary, types, in_conj} => let val pred_sym = pred_sym andalso top_level andalso not bool_vars val types' = types |> insert_type thy I T val in_conj = in_conj orelse conj_fact val min_ary = if (app_op_level = Sufficient_App_Op orelse app_op_level = Sufficient_App_Op_And_Predicator) andalso types' <> types then fold (consider_var_ary T) fun_var_Ts min_ary else min_ary in Symtab.update (s, {pred_sym = pred_sym, min_ary = Int.min (ary, min_ary), max_ary = Int.max (ary, max_ary), types = types', in_conj = in_conj}) sym_tab end | NONE => let val max_ary = (case unprefix_and_unascii const_prefix s of SOME s => (if String.isSubstring uncurried_alias_sep s then ary else (case try (ary_of o robust_const_type thy o unmangled_invert_const) s of SOME ary0 => Int.min (ary0, ary) | NONE => ary)) | NONE => ary) val pred_sym = top_level andalso max_ary = ary andalso not bool_vars val min_ary = (case app_op_level of Min_App_Op => max_ary | Full_App_Op_And_Predicator => 0 | _ => fold (consider_var_ary T) fun_var_Ts max_ary) in Symtab.update_new (s, {pred_sym = pred_sym, min_ary = min_ary, max_ary = max_ary, types = [T], in_conj = conj_fact}) sym_tab end)) end | IVar (_, T) => add_universal_var T accum | IAbs ((_, T), tm) => accum |> add_universal_var T |> add_iterm_syms false tm | _ => accum) |> fold (add_iterm_syms false) args end in add_iterm_syms end fun sym_table_of_facts ctxt type_enc app_op_level conjs facts = let fun add_iterm_syms conj_fact = add_iterm_syms_to_sym_table ctxt app_op_level conj_fact true fun add_fact_syms conj_fact = ifact_lift (formula_fold NONE (K (add_iterm_syms conj_fact))) in ((false, []), Symtab.empty) |> fold (add_fact_syms true) conjs |> fold (add_fact_syms false) facts ||> fold Symtab.update (default_sym_tab_entries type_enc) end fun min_ary_of sym_tab s = (case Symtab.lookup sym_tab s of SOME ({min_ary, ...} : sym_info) => min_ary | NONE => (case unprefix_and_unascii const_prefix s of SOME s => let val s = s |> unmangled_invert_const in if s = predicator_name then 1 else if s = app_op_name then 2 else if s = type_guard_name then 1 else 0 end | NONE => 0)) (* True if the constant ever appears outside of the top-level position in literals, or if it appears with different arities (e.g., because of different type instantiations). If false, the constant always receives all of its arguments and is used as a predicate. *) fun is_pred_sym sym_tab s = (case Symtab.lookup sym_tab s of SOME ({pred_sym, min_ary, max_ary, ...} : sym_info) => pred_sym andalso min_ary = max_ary | NONE => false) val fTrue_iconst = IConst ((const_prefix ^ "fTrue", \<^const_name>\fTrue\), \<^typ>\bool\, []) val predicator_iconst = IConst (`(make_fixed_const NONE) predicator_name, \<^typ>\bool => bool\, []) fun predicatify completish tm = if completish > 1 then IApp (IApp (IConst (`I tptp_equal, \<^typ>\bool => bool => bool\, []), tm), fTrue_iconst) else IApp (predicator_iconst, tm) val app_op = `(make_fixed_const NONE) app_op_name fun list_app head args = fold (curry (IApp o swap)) args head fun mk_app_op type_enc head arg = let val head_T = ityp_of head val (arg_T, res_T) = dest_funT head_T val app = IConst (app_op, head_T --> head_T, [arg_T, res_T]) |> mangle_type_args_in_iterm type_enc in list_app app [head, arg] end fun firstorderize_fact thy ctrss type_enc uncurried_aliases completish sym_tab = let fun do_app arg head = mk_app_op type_enc head arg fun list_app_ops (head, args) = fold do_app args head fun introduce_app_ops tm = let val (head, args) = tm |> strip_iterm_comb ||> map introduce_app_ops in (case head of IConst (name as (s, _), T, T_args) => let val min_ary = min_ary_of sym_tab s val ary = if uncurried_aliases andalso String.isPrefix const_prefix s then let val ary = length args (* In polymorphic native type encodings, it is impossible to declare a fully polymorphic symbol that takes more arguments than its signature (even though such concrete instances, where a type variable is instantiated by a function type, are possible.) *) val official_ary = if is_type_enc_polymorphic type_enc then (case unprefix_and_unascii const_prefix s of SOME s' => (case try (ary_of o robust_const_type thy) (invert_const s') of SOME ary => ary | NONE => min_ary) | NONE => min_ary) else 1000000000 (* irrealistically big arity *) in Int.min (ary, official_ary) end else min_ary val head = if ary = min_ary then head else IConst (aliased_uncurried ary name, T, T_args) in args |> chop ary |>> list_app head |> list_app_ops end | IAbs ((name, T), tm) => list_app_ops (IAbs ((name, T), introduce_app_ops tm), args) | _ => list_app_ops (head, args)) end fun introduce_predicators tm = (case strip_iterm_comb tm of (IConst ((s, _), _, _), _) => if is_pred_sym sym_tab s then tm else predicatify completish tm | _ => predicatify completish tm) val is_ho = is_type_enc_higher_order type_enc val is_full_ho = is_type_enc_full_higher_order type_enc val is_fool = is_type_enc_fool type_enc val do_iterm = (not is_ho ? introduce_app_ops) #> (not (is_full_ho orelse is_fool) ? introduce_predicators) #> filter_type_args_in_iterm thy ctrss type_enc in update_iformula (formula_map do_iterm) end (** Helper facts **) val not_ffalse = @{lemma "\ fFalse" by (unfold fFalse_def) fast} val ftrue = @{lemma "fTrue" by (unfold fTrue_def) fast} (* The Boolean indicates that a fairly sound type encoding is needed. *) fun helper_table with_combs = (if with_combs then [(("COMBI", false), [(Non_Rec_Def, @{thm Meson.COMBI_def})]), (("COMBK", false), [(Non_Rec_Def, @{thm Meson.COMBK_def})]), (("COMBB", false), [(Non_Rec_Def, @{thm Meson.COMBB_def})]), (("COMBC", false), [(Non_Rec_Def, @{thm Meson.COMBC_def})]), (("COMBS", false), [(Non_Rec_Def, @{thm Meson.COMBS_def})])] else []) @ [((predicator_name, false), [(General, not_ffalse), (General, ftrue)]), (("fFalse", false), [(General, not_ffalse)]), (("fFalse", true), [(General, @{thm True_or_False})]), (("fTrue", false), [(General, ftrue)]), (("fTrue", true), [(General, @{thm True_or_False})]), (("If", true), [(Non_Rec_Def, @{thm if_True}), (Non_Rec_Def, @{thm if_False}), (General, @{thm True_or_False})]), (("fNot", false), @{thms fNot_def [THEN Meson.iff_to_disjD, THEN conjunct1] fNot_def [THEN Meson.iff_to_disjD, THEN conjunct2]} |> map (pair Non_Rec_Def)), (("fconj", false), @{lemma "\ P \ \ Q \ fconj P Q" "\ fconj P Q \ P" "\ fconj P Q \ Q" by (unfold fconj_def) fast+} |> map (pair General)), (("fdisj", false), @{lemma "\ P \ fdisj P Q" "\ Q \ fdisj P Q" "\ fdisj P Q \ P \ Q" by (unfold fdisj_def) fast+} |> map (pair General)), (("fimplies", false), @{lemma "P \ fimplies P Q" "\ Q \ fimplies P Q" "\ fimplies P Q \ \ P \ Q" by (unfold fimplies_def) fast+} |> map (pair General)), (("fequal", true), (* This is a lie: Higher-order equality doesn't need a sound type encoding. However, this is done so for backward compatibility: Including the equality helpers by default in Metis breaks a few existing proofs. *) @{thms fequal_def [THEN Meson.iff_to_disjD, THEN conjunct1] fequal_def [THEN Meson.iff_to_disjD, THEN conjunct2]} |> map (pair General)), (* Partial characterization of "fAll" and "fEx". A complete characterization would require the axiom of choice for replay with Metis. *) (("fAll", false), [(General, @{lemma "\ fAll P \ P x" by (auto simp: fAll_def)})]), - (("fEx", false), [(General, @{lemma "\ P x \ fEx P" by (auto simp: fEx_def)})])] + (("fEx", false), [(General, @{lemma "\ P x \ fEx P" by (auto simp: fEx_def)})]), + (("fChoice", true), [(General, @{thm fChoice_iff_Ex})])] |> map (apsnd (map (apsnd zero_var_indexes))) +val () = + let + fun is_skolemizable \<^Const_>\Ex _ for \Abs _\\ = true + | is_skolemizable _ = false + + fun check_no_skolemizable_thm thm = + if Term.exists_subterm is_skolemizable (Thm.full_prop_of thm) then + error "Theorems of the helper table cannot contain skolemizable terms because they don't \ + \get skolimized in metis." + else + () + in + helper_table true + |> List.app (fn (_, thms) => List.app (check_no_skolemizable_thm o snd) thms) + end + fun completish_helper_table with_combs = helper_table with_combs @ [((predicator_name, true), @{thms True_or_False fTrue_ne_fFalse} |> map (pair General)), ((app_op_name, true), [(General, @{lemma "\x. \ f x = g x \ f = g" by blast}), (General, @{lemma "\p. (p x \ p y) \ x = y" by blast})]), (("fconj", false), @{thms fconj_table fconj_laws fdisj_laws} |> map (pair Non_Rec_Def)), (("fdisj", false), @{thms fdisj_table fconj_laws fdisj_laws} |> map (pair Non_Rec_Def)), (("fimplies", false), @{thms fimplies_table fconj_laws fdisj_laws fimplies_laws} |> map (pair Non_Rec_Def)), (("fequal", false), (@{thms fequal_table} |> map (pair Non_Rec_Def)) @ (@{thms fequal_laws} |> map (pair General))), (("fAll", false), @{thms fAll_table fComp_law fAll_law fEx_law} |> map (pair Non_Rec_Def)), (("fEx", false), @{thms fEx_table fComp_law fAll_law fEx_law} |> map (pair Non_Rec_Def))] |> map (apsnd (map (apsnd zero_var_indexes))) fun bound_tvars type_enc sorts Ts = (case filter is_TVar Ts of [] => I | Ts => ((sorts andalso polymorphism_of_type_enc type_enc <> Type_Class_Polymorphic) ? mk_ahorn (Ts |> class_membs_of_types type_enc add_sorts_on_tvar |> map (class_atom type_enc))) #> (case type_enc of - Native (_, _, _, Type_Class_Polymorphic, _) => + Native (_, _, Type_Class_Polymorphic, _) => mk_atyquant AForall (map (fn TVar (z as (_, S)) => (AType ((tvar_name z, []), []), map (`make_class) (normalize_classes S) )) Ts) - | Native (_, _, _, Raw_Polymorphic _, _) => + | Native (_, _, Raw_Polymorphic _, _) => mk_atyquant AForall (map (fn TVar (z as _) => (AType ((tvar_name z, []), []), [])) Ts) | _ => mk_aquant AForall (map (fn TVar z => (tvar_name z, NONE)) Ts))) fun eq_formula type_enc atomic_Ts bounds pred_sym tm1 tm2 = (if pred_sym then AConn (AIff, [AAtom tm1, AAtom tm2]) else AAtom (ATerm ((`I tptp_equal, []), [tm1, tm2]))) |> mk_aquant AForall bounds |> close_formula_universally |> bound_tvars type_enc true atomic_Ts val helper_rank = default_rank val min_rank = 9 * helper_rank div 10 val max_rank = 4 * min_rank fun rank_of_fact_num n j = min_rank + (max_rank - min_rank) * j div n val type_tag = `(make_fixed_const NONE) type_tag_name fun could_specialize_helpers type_enc = not (is_type_enc_polymorphic type_enc) andalso level_of_type_enc type_enc <> No_Types fun should_specialize_helper type_enc t = could_specialize_helpers type_enc andalso not (null (Term.hidden_polymorphism t)) fun add_helper_facts_of_sym ctxt format type_enc lam_trans completish (s, {types, ...} : sym_info) = (case unprefix_and_unascii const_prefix s of SOME mangled_s => let val thy = Proof_Context.theory_of ctxt val unmangled_s = mangled_s |> unmangled_const_name |> hd fun dub needs_sound j k = ascii_of unmangled_s ^ "_" ^ string_of_int j ^ "_" ^ string_of_int k ^ (if mangled_s = unmangled_s then "" else "_" ^ ascii_of mangled_s) ^ (if needs_sound then typed_helper_suffix else untyped_helper_suffix) fun specialize_helper t T = if unmangled_s = app_op_name then let val tyenv = Sign.typ_match thy (alpha_to_beta, domain_type T) Vartab.empty in Envir.subst_term_types tyenv t end else specialize_type thy (invert_const unmangled_s, T) t fun dub_and_inst needs_sound (j, (status, t)) = (if should_specialize_helper type_enc t then map_filter (try (specialize_helper t)) types else [t]) |> tag_list 1 |> map (fn (k, t) => ((dub needs_sound j k, (Global, status)), t)) fun make_facts type_enc = map_filter (make_fact ctxt format type_enc false) val sound = is_type_enc_sound type_enc val could_specialize = could_specialize_helpers type_enc val with_combs = lam_trans <> opaque_combsN in fold (fn ((helper_s, needs_sound), ths) => let - fun map_syntax_of_type_enc f (Native (order, fool, syntax, polymorphism, type_level)) = - Native (order, fool, f syntax, polymorphism, type_level) - | map_syntax_of_type_enc _ type_enc = type_enc - val remove_ite_syntax = map_syntax_of_type_enc (K {with_ite = false}) + fun map_syntax f (Native (order, With_FOOL syntax, polymorphism, type_level)) = + Native (order, With_FOOL (f syntax), polymorphism, type_level) + | map_syntax _ type_enc = type_enc + val remove_ite_syntax = map_syntax + (fn {with_let, ...} => {with_ite = false, with_let = with_let}) in if (needs_sound andalso not sound) orelse (helper_s <> unmangled_s andalso (completish < 3 orelse could_specialize)) then I else ths |> map_index (apfst (curry op+ 1)) |> maps (dub_and_inst needs_sound o apsnd (apsnd Thm.prop_of)) |> make_facts ((helper_s = "If" ? remove_ite_syntax) type_enc) |> union (op = o apply2 #iformula) end) ((if completish >= 3 then completish_helper_table else helper_table) with_combs) end | NONE => I) fun helper_facts_of_sym_table ctxt format type_enc lam_trans completish sym_tab = Symtab.fold_rev (add_helper_facts_of_sym ctxt format type_enc lam_trans completish) sym_tab [] (***************************************************************) (* Type Classes Present in the Axiom or Conjecture Clauses *) (***************************************************************) fun set_insert (x, s) = Symtab.update (x, ()) s fun add_classes (cls, cset) = List.foldl set_insert cset (flat cls) fun classes_of_terms get_Ts = map (map snd o get_Ts) #> List.foldl add_classes Symtab.empty #> Symtab.delete_safe class_of_types #> Symtab.keys val tfree_classes_of_terms = classes_of_terms Misc_Legacy.term_tfrees val tvar_classes_of_terms = classes_of_terms Misc_Legacy.term_tvars fun fold_type_ctrs f (Type (s, Ts)) x = fold (fold_type_ctrs f) Ts (f (s, x)) | fold_type_ctrs _ _ x = x (* Type constructors used to instantiate overloaded constants are the only ones needed. *) fun add_type_ctrs_in_term thy = let fun add (Const (\<^const_name>\Meson.skolem\, _) $ _) = I | add (t $ u) = add t #> add u | add (Const x) = x |> robust_const_type_args thy |> fold (fold_type_ctrs set_insert) | add (Abs (_, _, u)) = add u | add _ = I in add end fun type_ctrs_of_terms thy ts = Symtab.keys (fold (add_type_ctrs_in_term thy) ts Symtab.empty) fun trans_lams_of_string ctxt type_enc lam_trans = if lam_trans = no_lamsN then rpair [] else if lam_trans = opaque_liftingN then lift_lams ctxt type_enc ##> K [] else if lam_trans = liftingN then lift_lams ctxt type_enc else if lam_trans = opaque_combsN orelse lam_trans = combsN then map (introduce_combinators ctxt type_enc) #> rpair [] else if lam_trans = combs_and_liftingN then lift_lams_part_1 ctxt type_enc ##> maps (fn t => [t, introduce_combinators ctxt type_enc (intentionalize_def t)]) #> lift_lams_part_2 ctxt type_enc else if lam_trans = combs_or_liftingN then lift_lams_part_1 ctxt type_enc ##> map (fn t => (case head_of (strip_qnt_body \<^const_name>\All\ t) of \<^term>\(=) ::bool => bool => bool\ => t | _ => introduce_combinators ctxt type_enc (intentionalize_def t))) #> lift_lams_part_2 ctxt type_enc else if lam_trans = keep_lamsN then map (Envir.eta_contract) #> rpair [] else error ("Unknown lambda translation scheme: " ^ quote lam_trans) val pull_and_reorder_definitions = let fun add_consts (IApp (t, u)) = fold add_consts [t, u] | add_consts (IAbs (_, t)) = add_consts t | add_consts (IConst (name, _, _)) = insert (op =) name | add_consts (IVar _) = I fun consts_of_hs l_or_r ({iformula, ...} : ifact) = (case iformula of AAtom (IApp (IApp (IConst _, t), u)) => add_consts (l_or_r (t, u)) [] | _ => []) (* Quadratic, but usually OK. *) fun reorder [] [] = [] | reorder (fact :: skipped) [] = fact :: reorder [] skipped (* break cycle *) | reorder skipped (fact :: facts) = let val rhs_consts = consts_of_hs snd fact in if exists (exists (exists (member (op =) rhs_consts) o consts_of_hs fst)) [skipped, facts] then reorder (fact :: skipped) facts else fact :: reorder [] (facts @ skipped) end in List.partition (curry (op =) Definition o #role) #>> reorder [] #> op @ end fun s_not_prop \<^Const_>\Trueprop for t\ = \<^Const>\Trueprop for \s_not t\\ | s_not_prop \<^Const_>\Pure.imp for t \<^prop>\False\\ = t | s_not_prop t = \<^Const>\Pure.imp for t \<^prop>\False\\ fun translate_formulas simp_options ctxt prem_role format type_enc lam_trans presimp hyp_ts concl_t facts = let val thy = Proof_Context.theory_of ctxt val trans_lams = trans_lams_of_string ctxt type_enc lam_trans val fact_ts = facts |> map snd (* Remove existing facts from the conjecture, as this can dramatically boost an ATP's performance (for some reason). *) val hyp_ts = hyp_ts |> map (fn t => if member (op aconv) fact_ts t then \<^prop>\True\ else t) val hyp_ts = map freeze_term hyp_ts; val concl_t = freeze_term concl_t; val facts = facts |> map (apsnd (pair Axiom)) val conjs = map (pair prem_role) hyp_ts @ [(Conjecture, s_not_prop concl_t)] |> map2 (pair o rpair (Local, General) o string_of_int) (0 upto length hyp_ts) val ((conjs, facts), lam_facts) = (conjs, facts) |> presimp ? apply2 (map (apsnd (apsnd (presimp_prop simp_options ctxt type_enc)))) |> (if lam_trans = no_lamsN then rpair [] else op @ #> preprocess_abstractions_in_terms trans_lams #>> chop (length conjs)) val conjs = conjs |> make_conjecture ctxt format type_enc |> pull_and_reorder_definitions val facts = facts |> map_filter (fn (name, (_, t)) => make_fact ctxt format type_enc true (name, t)) |> pull_and_reorder_definitions val lifted = lam_facts |> map (extract_lambda_def dest_Const o snd o snd) val lam_facts = lam_facts |> map_filter (make_fact ctxt format type_enc true o apsnd snd) val all_ts = concl_t :: hyp_ts @ fact_ts val subs = tfree_classes_of_terms all_ts val supers = tvar_classes_of_terms all_ts val tycons = type_ctrs_of_terms thy all_ts val (supers, tcon_clauses) = if level_of_type_enc type_enc = No_Types then ([], []) else make_tcon_clauses thy tycons supers val subclass_pairs = make_subclass_pairs thy subs supers in (union (op =) subs supers, conjs, facts @ lam_facts, subclass_pairs, tcon_clauses, lifted) end val type_guard = `(make_fixed_const NONE) type_guard_name fun type_guard_iterm type_enc T tm = IApp (IConst (type_guard, T --> \<^typ>\bool\, [T]) |> mangle_type_args_in_iterm type_enc, tm) fun is_var_positively_naked_in_term _ (SOME false) _ accum = accum | is_var_positively_naked_in_term name _ (ATerm (((s, _), _), tms)) accum = accum orelse (is_tptp_equal s andalso member (op =) tms (ATerm ((name, []), []))) | is_var_positively_naked_in_term _ _ _ _ = true fun is_var_undercover_in_term thy name pos tm accum = accum orelse let val var = ATerm ((name, []), []) fun is_undercover (ATerm (_, [])) = false | is_undercover (ATerm (((s, _), _), tms)) = let val ary = length tms val cover = type_arg_cover thy pos s ary in exists (fn (j, tm) => tm = var andalso member (op =) cover j) (0 upto ary - 1 ~~ tms) orelse exists is_undercover tms end | is_undercover _ = true in is_undercover tm end fun should_guard_var_in_formula thy level pos phi (SOME true) name = (case level of All_Types => true | Undercover_Types => formula_fold pos (is_var_undercover_in_term thy name) phi false | Nonmono_Types (_, Uniform) => true | Nonmono_Types (_, Non_Uniform) => formula_fold pos (is_var_positively_naked_in_term name) phi false | _ => false) | should_guard_var_in_formula _ _ _ _ _ _ = true fun always_guard_var_in_formula _ _ _ _ _ _ = true fun should_generate_tag_bound_decl _ _ _ (SOME true) _ = false | should_generate_tag_bound_decl ctxt mono (Tags (_, level)) _ T = not (is_type_level_uniform level) andalso should_encode_type ctxt mono level T | should_generate_tag_bound_decl _ _ _ _ _ = false fun mk_aterm type_enc name T_args args = let val (ty_args, tm_args) = process_type_args type_enc T_args in ATerm ((name, ty_args), tm_args @ args) end fun do_bound_type type_enc = (case type_enc of Native _ => native_atp_type_of_typ type_enc false 0 #> SOME | _ => K NONE) fun tag_with_type ctxt mono type_enc pos T tm = IConst (type_tag, T --> T, [T]) |> mangle_type_args_in_iterm type_enc |> atp_term_of_iterm ctxt mono type_enc pos |> (fn ATerm ((s, tys), tms) => ATerm ((s, tys), tms @ [tm]) | _ => raise Fail "unexpected lambda-abstraction") and atp_term_of_iterm ctxt mono type_enc pos = let fun term site u = let val (head, args) = strip_iterm_comb u val pos = (case site of Top_Level pos => pos | Eq_Arg pos => pos | _ => NONE) val T = ityp_of u val t = (case head of IConst (name as (s, _), _, T_args) => let val ary = length args fun arg_site j = if is_tptp_equal s then Eq_Arg pos else Arg (s, j, ary) in map2 (fn j => term (arg_site j)) (0 upto ary - 1) args |> mk_aterm type_enc name T_args end | IVar (name, _) => map (term Elsewhere) args |> mk_aterm type_enc name [] | IAbs ((name, T), tm) => if is_type_enc_higher_order type_enc orelse is_type_enc_fool type_enc then AAbs (((name, native_atp_type_of_typ type_enc false 0 T), term Elsewhere tm), map (term Elsewhere) args) else raise Fail "unexpected lambda-abstraction" | IApp _ => raise Fail "impossible \"IApp\"") val tag = should_tag_with_type ctxt mono type_enc site u T in t |> tag ? tag_with_type ctxt mono type_enc pos T end in term (Top_Level pos) end and formula_of_iformula ctxt mono type_enc should_guard_var = let val thy = Proof_Context.theory_of ctxt val level = level_of_type_enc type_enc val do_term = atp_term_of_iterm ctxt mono type_enc fun do_out_of_bound_type pos phi universal (name, T) = if should_guard_type ctxt mono type_enc (fn () => should_guard_var thy level pos phi universal name) T then IVar (name, T) |> type_guard_iterm type_enc T |> do_term pos |> AAtom |> SOME else if should_generate_tag_bound_decl ctxt mono type_enc universal T then let val var = ATerm ((name, []), []) val tagged_var = tag_with_type ctxt mono type_enc pos T var in SOME (AAtom (ATerm ((`I tptp_equal, []), [tagged_var, var]))) end else NONE fun do_formula pos (ATyQuant (q, xs, phi)) = ATyQuant (q, map (apfst (native_atp_type_of_typ type_enc false 0)) xs, do_formula pos phi) | do_formula pos (AQuant (q, xs, phi)) = let val phi = phi |> do_formula pos val universal = Option.map (q = AExists ? not) pos in AQuant (q, xs |> map (apsnd (fn NONE => NONE | SOME T => do_bound_type type_enc T)), (if q = AForall then mk_ahorn else fold_rev (mk_aconn AAnd)) (map_filter (fn (_, NONE) => NONE | (s, SOME T) => do_out_of_bound_type pos phi universal (s, T)) xs) phi) end | do_formula pos (AConn conn) = aconn_map pos do_formula conn | do_formula pos (AAtom tm) = AAtom (do_term pos tm) in do_formula end fun string_of_status General = "" | string_of_status Induction = inductionN | string_of_status Intro = introN | string_of_status Inductive = inductiveN | string_of_status Elim = elimN | string_of_status Simp = simpN | string_of_status Non_Rec_Def = non_rec_defN | string_of_status Rec_Def = rec_defN (* Each fact is given a unique fact number to avoid name clashes (e.g., because of monomorphization). The TPTP forbids name clashes, and some of the remote provers might care. *) fun line_of_fact ctxt generate_info prefix encode alt freshen pos mono type_enc rank (j, {name, stature = (_, status), role, iformula, atomic_types}) = Formula ((prefix ^ (if freshen then string_of_int j ^ "_" else "") ^ encode name, alt name), role, iformula |> formula_of_iformula ctxt mono type_enc should_guard_var_in_formula (if pos then SOME true else NONE) |> close_formula_universally |> bound_tvars type_enc true atomic_types, NONE, isabelle_info generate_info (string_of_status status) (rank j)) fun lines_of_subclass generate_info type_enc sub super = Formula ((subclass_prefix ^ ascii_of sub ^ "___" ^ ascii_of super, ""), Axiom, AConn (AImplies, [sub, super] |> map (fn s => class_atom type_enc (s, tvar_a))) |> bound_tvars type_enc false [tvar_a], NONE, isabelle_info generate_info inductiveN helper_rank) fun lines_of_subclass_pair generate_info type_enc (sub, supers) = if polymorphism_of_type_enc type_enc = Type_Class_Polymorphic then [Class_Decl (class_decl_prefix ^ ascii_of sub, `make_class sub, map (`make_class) supers)] else map (lines_of_subclass generate_info type_enc sub) supers fun line_of_tcon_clause generate_info type_enc (name, prems, (cl, T)) = if polymorphism_of_type_enc type_enc = Type_Class_Polymorphic then Class_Memb (class_memb_prefix ^ name, map (fn (cls, T) => (T |> dest_TVar |> tvar_name, map (`make_class) cls)) prems, native_atp_type_of_typ type_enc false 0 T, `make_class cl) else Formula ((tcon_clause_prefix ^ name, ""), Axiom, mk_ahorn (maps (class_atoms type_enc) prems) (class_atom type_enc (cl, T)) |> bound_tvars type_enc true (snd (dest_Type T)), NONE, isabelle_info generate_info inductiveN helper_rank) fun line_of_conjecture ctxt mono type_enc ({name, role, iformula, atomic_types, ...} : ifact) = Formula ((conjecture_prefix ^ name, ""), role, iformula |> formula_of_iformula ctxt mono type_enc should_guard_var_in_formula (SOME false) |> close_formula_universally |> bound_tvars type_enc true atomic_types, NONE, []) fun lines_of_free_types type_enc (facts : ifact list) = if is_type_enc_polymorphic type_enc then let val type_classes = (polymorphism_of_type_enc type_enc = Type_Class_Polymorphic) fun line j (cl, T) = if type_classes then Class_Memb (class_memb_prefix ^ string_of_int j, [], native_atp_type_of_typ type_enc false 0 T, `make_class cl) else Formula ((tfree_clause_prefix ^ string_of_int j, ""), Hypothesis, class_atom type_enc (cl, T), NONE, []) val membs = fold (union (op =)) (map #atomic_types facts) [] |> class_membs_of_types type_enc add_sorts_on_tfree in map2 line (0 upto length membs - 1) membs end else [] (** Symbol declarations **) fun decl_line_of_class phantoms s = let val name as (s, _) = `make_class s in Sym_Decl (sym_decl_prefix ^ s, name, APi ([tvar_a_name], if phantoms = Without_Phantom_Type_Vars then AFun (a_itself_atype, bool_atype) else bool_atype)) end fun decl_lines_of_classes type_enc = (case type_enc of - Native (_, _, _, Raw_Polymorphic phantoms, _) => map (decl_line_of_class phantoms) + Native (_, _, Raw_Polymorphic phantoms, _) => map (decl_line_of_class phantoms) | _ => K []) fun sym_decl_table_of_facts thy type_enc sym_tab (conjs, facts, extra_tms) = let fun add_iterm_syms tm = let val (head, args) = strip_iterm_comb tm in (case head of IConst ((s, s'), T, T_args) => let val (pred_sym, in_conj) = (case Symtab.lookup sym_tab s of SOME ({pred_sym, in_conj, ...} : sym_info) => (pred_sym, in_conj) | NONE => (false, false)) val decl_sym = (case type_enc of Guards _ => not pred_sym | _ => true) andalso not (String.isPrefix let_bound_var_prefix s) andalso is_tptp_user_symbol s in if decl_sym then Symtab.map_default (s, []) (insert_type thy #3 (s', T_args, T, pred_sym, length args, in_conj)) else I end | IAbs (_, tm) => add_iterm_syms tm | _ => I) #> fold add_iterm_syms args end val add_fact_syms = ifact_lift (formula_fold NONE (K add_iterm_syms)) fun add_formula_var_types (ATyQuant (_, _, phi)) = add_formula_var_types phi | add_formula_var_types (AQuant (_, xs, phi)) = fold (fn (_, SOME T) => insert_type thy I T | _ => I) xs #> add_formula_var_types phi | add_formula_var_types (AConn (_, phis)) = fold add_formula_var_types phis | add_formula_var_types _ = I fun var_types () = if is_type_enc_polymorphic type_enc then [tvar_a] else fold (ifact_lift add_formula_var_types) (conjs @ facts) [] fun add_undefined_const T = let (* FIXME: make sure type arguments are filtered / clean up code *) val (s, s') = `(make_fixed_const NONE) \<^const_name>\undefined\ |> (is_type_enc_mangling type_enc ? mangled_const_name type_enc [T]) in Symtab.map_default (s, []) (insert_type thy #3 (s', [T], T, false, 0, false)) end fun add_TYPE_const () = let val (s, s') = TYPE_name in Symtab.map_default (s, []) (insert_type thy #3 (s', [tvar_a], \<^typ>\'a itself\, false, 0, false)) end in Symtab.empty |> is_type_enc_sound type_enc ? (fold (fold add_fact_syms) [conjs, facts] #> fold add_iterm_syms extra_tms #> (case type_enc of - Native (_, _, _, Raw_Polymorphic phantoms, _) => + Native (_, _, Raw_Polymorphic phantoms, _) => phantoms = Without_Phantom_Type_Vars ? add_TYPE_const () | Native _ => I | _ => fold add_undefined_const (var_types ()))) end (* We add "bool" in case the helper "True_or_False" is included later. *) fun default_mono level completish = {maybe_finite_Ts = [\<^typ>\bool\], surely_infinite_Ts = (case level of Nonmono_Types (Strict, _) => [] | _ => known_infinite_types), maybe_nonmono_Ts = [if completish >= 3 then tvar_a else \<^typ>\bool\]} (* This inference is described in section 4 of Blanchette et al., "Encoding monomorphic and polymorphic types", TACAS 2013. *) fun add_iterm_mononotonicity_info ctxt level polarity tm (mono as {maybe_finite_Ts, surely_infinite_Ts, maybe_nonmono_Ts}) = let val thy = Proof_Context.theory_of ctxt fun update_mono T mono = (case level of Nonmono_Types (strictness, _) => if exists (type_instance thy T) surely_infinite_Ts orelse member (type_equiv thy) maybe_finite_Ts T then mono else if is_type_kind_of_surely_infinite ctxt strictness surely_infinite_Ts T then {maybe_finite_Ts = maybe_finite_Ts, surely_infinite_Ts = surely_infinite_Ts |> insert_type thy I T, maybe_nonmono_Ts = maybe_nonmono_Ts} else {maybe_finite_Ts = maybe_finite_Ts |> insert (type_equiv thy) T, surely_infinite_Ts = surely_infinite_Ts, maybe_nonmono_Ts = maybe_nonmono_Ts |> insert_type thy I T} | _ => mono) fun update_mono_rec (IConst ((_, s'), Type (_, [T, _]), _)) = if String.isPrefix \<^const_name>\fequal\ s' then update_mono T else I | update_mono_rec (IApp (tm1, tm2)) = fold update_mono_rec [tm1, tm2] | update_mono_rec (IAbs (_, tm)) = update_mono_rec tm | update_mono_rec _ = I in mono |> (case tm of IApp (IApp (IConst ((s, _), Type (_, [T, _]), _), tm1), tm2) => ((polarity <> SOME false andalso is_tptp_equal s andalso exists is_maybe_universal_var [tm1, tm2]) ? update_mono T) #> fold update_mono_rec [tm1, tm2] | _ => update_mono_rec tm) end fun add_fact_mononotonicity_info ctxt level ({role, iformula, ...} : ifact) = formula_fold (SOME (role <> Conjecture)) (add_iterm_mononotonicity_info ctxt level) iformula fun mononotonicity_info_of_facts ctxt type_enc completish facts = let val level = level_of_type_enc type_enc in default_mono level completish |> is_type_level_monotonicity_based level ? fold (add_fact_mononotonicity_info ctxt level) facts end fun fold_arg_types f (IApp (tm1, tm2)) = fold_arg_types f tm1 #> fold_term_types f tm2 | fold_arg_types _ _ = I and fold_term_types f tm = f (ityp_of tm) #> fold_arg_types f tm fun add_iformula_monotonic_types ctxt mono type_enc = let val thy = Proof_Context.theory_of ctxt val level = level_of_type_enc type_enc val should_encode = should_encode_type ctxt mono level fun add_type T = not (should_encode T) ? insert_type thy I T in formula_fold NONE (K (fold_term_types add_type)) end fun add_fact_monotonic_types ctxt mono type_enc = ifact_lift (add_iformula_monotonic_types ctxt mono type_enc) fun monotonic_types_of_facts ctxt mono type_enc facts = let val level = level_of_type_enc type_enc in [] |> (is_type_enc_polymorphic type_enc andalso is_type_level_monotonicity_based level) ? fold (add_fact_monotonic_types ctxt mono type_enc) facts end fun line_of_guards_mono_type ctxt generate_info mono type_enc T = Formula ((guards_sym_formula_prefix ^ ascii_of (mangled_type type_enc T), ""), Axiom, IConst (`make_bound_var "X", T, []) |> type_guard_iterm type_enc T |> AAtom |> formula_of_iformula ctxt mono type_enc always_guard_var_in_formula (SOME true) |> close_formula_universally |> bound_tvars type_enc true (atomic_types_of T), NONE, isabelle_info generate_info inductiveN helper_rank) fun line_of_tags_mono_type ctxt generate_info mono type_enc T = let val x_var = ATerm ((`make_bound_var "X", []), []) in Formula ((tags_sym_formula_prefix ^ ascii_of (mangled_type type_enc T), ""), Axiom, eq_formula type_enc (atomic_types_of T) [] false (tag_with_type ctxt mono type_enc NONE T x_var) x_var, NONE, isabelle_info generate_info non_rec_defN helper_rank) end fun lines_of_mono_types ctxt generate_info mono type_enc = (case type_enc of Native _ => K [] | Guards _ => map (line_of_guards_mono_type ctxt generate_info mono type_enc) | Tags _ => map (line_of_tags_mono_type ctxt generate_info mono type_enc)) fun decl_line_of_sym ctxt type_enc s (s', T_args, T, pred_sym, ary, _) = let val thy = Proof_Context.theory_of ctxt val (T, T_args) = if null T_args then (T, []) else (case unprefix_and_unascii const_prefix s of SOME s' => let val s' = s' |> unmangled_invert_const val T = s' |> robust_const_type thy in (T, robust_const_type_args thy (s', T)) end | NONE => raise Fail "unexpected type arguments") in Sym_Decl (sym_decl_prefix ^ s, (s, s'), T |> native_atp_type_of_typ type_enc pred_sym ary |> not (null T_args) ? curry APi (map (tvar_name o dest_TVar) T_args)) end fun honor_conj_sym_role in_conj = (if in_conj then Hypothesis else Axiom, I) fun line_of_guards_sym_decl ctxt generate_info mono type_enc n s j (s', T_args, T, _, ary, in_conj) = let val thy = Proof_Context.theory_of ctxt val (role, maybe_negate) = honor_conj_sym_role in_conj val (arg_Ts, res_T) = chop_fun ary T val bound_names = 1 upto ary |> map (`I o make_bound_var o string_of_int) val bounds = bound_names ~~ arg_Ts |> map (fn (name, T) => IConst (name, T, [])) val bound_Ts = (case level_of_type_enc type_enc of All_Types => if null T_args then replicate ary NONE else map SOME arg_Ts | Undercover_Types => let val cover = type_arg_cover thy NONE s ary in map2 (fn j => if member (op =) cover j then SOME else K NONE) (0 upto ary - 1) arg_Ts end | _ => replicate ary NONE) in Formula ((guards_sym_formula_prefix ^ s ^ (if n > 1 then "_" ^ string_of_int j else ""), ""), role, IConst ((s, s'), T, T_args) |> fold (curry (IApp o swap)) bounds |> type_guard_iterm type_enc res_T |> AAtom |> mk_aquant AForall (bound_names ~~ bound_Ts) |> formula_of_iformula ctxt mono type_enc always_guard_var_in_formula (SOME true) |> close_formula_universally |> bound_tvars type_enc (n > 1) (atomic_types_of T) |> maybe_negate, NONE, isabelle_info generate_info inductiveN helper_rank) end fun lines_of_tags_sym_decl ctxt generate_info mono type_enc n s (j, (s', T_args, T, pred_sym, ary, in_conj)) = let val thy = Proof_Context.theory_of ctxt val level = level_of_type_enc type_enc val ident = tags_sym_formula_prefix ^ s ^ (if n > 1 then "_" ^ string_of_int j else "") val (role, maybe_negate) = honor_conj_sym_role in_conj val (arg_Ts, res_T) = chop_fun ary T val bound_names = 1 upto ary |> map (`I o make_bound_var o string_of_int) val bounds = bound_names |> map (fn name => ATerm ((name, []), [])) val cst = mk_aterm type_enc (s, s') T_args val eq = maybe_negate oo eq_formula type_enc (atomic_types_of T) [] pred_sym val tag_with = tag_with_type ctxt mono type_enc NONE fun formula c = [Formula ((ident, ""), role, eq (tag_with res_T c) c, NONE, isabelle_info generate_info non_rec_defN helper_rank)] in if pred_sym orelse not (should_encode_type ctxt mono level res_T) then [] else if level = Undercover_Types then let val cover = type_arg_cover thy NONE s ary fun maybe_tag (j, arg_T) = member (op =) cover j ? tag_with arg_T val bounds = bounds |> map2 maybe_tag (0 upto ary - 1 ~~ arg_Ts) in formula (cst bounds) end else formula (cst bounds) end fun result_type_of_decl (_, _, T, _, ary, _) = chop_fun ary T |> snd fun rationalize_decls thy (decls as decl :: (decls' as _ :: _)) = let val T = result_type_of_decl decl |> map_type_tvar (fn (z, _) => TVar (z, \<^sort>\type\)) in if forall (type_generalization thy T o result_type_of_decl) decls' then [decl] else decls end | rationalize_decls _ decls = decls fun lines_of_sym_decls ctxt generate_info mono type_enc (s, decls) = (case type_enc of Native _ => [decl_line_of_sym ctxt type_enc s (hd decls)] | Guards (_, level) => let val thy = Proof_Context.theory_of ctxt val decls = decls |> rationalize_decls thy val n = length decls val decls = decls |> filter (should_encode_type ctxt mono level o result_type_of_decl) in (0 upto length decls - 1, decls) |-> map2 (line_of_guards_sym_decl ctxt generate_info mono type_enc n s) end | Tags (_, level) => if is_type_level_uniform level then [] else let val n = length decls in (0 upto n - 1 ~~ decls) |> maps (lines_of_tags_sym_decl ctxt generate_info mono type_enc n s) end) fun lines_of_sym_decl_table ctxt generate_info mono type_enc mono_Ts sym_decl_tab = let val syms = sym_decl_tab |> Symtab.dest |> sort_by fst val mono_lines = lines_of_mono_types ctxt generate_info mono type_enc mono_Ts val decl_lines = maps (lines_of_sym_decls ctxt generate_info mono type_enc) syms in mono_lines @ decl_lines end fun datatypes_of_sym_table ctxt ctrss (DFG Polymorphic) (type_enc as Native _) uncurried_aliases sym_tab = if is_type_enc_polymorphic type_enc then let val thy = Proof_Context.theory_of ctxt fun do_ctr (s, T) = let val s' = make_fixed_const (SOME type_enc) s val ary = ary_of T fun mk name = SOME (mk_aterm type_enc name (robust_const_type_args thy (s, T)) []) in if T = HOLogic.boolT then (case proxify_const s' of SOME proxy_base => mk (proxy_base |>> prefix const_prefix) | NONE => NONE) else (case Symtab.lookup sym_tab s' of NONE => NONE | SOME ({min_ary, ...} : sym_info) => if ary = min_ary then mk (s', s) else if uncurried_aliases then mk (aliased_uncurried ary (s', s)) else NONE) end fun datatype_of_ctrs (ctrs as (_, T1) :: _) = let val ctrs' = map do_ctr ctrs in (native_atp_type_of_typ type_enc false 0 (body_type T1), map_filter I ctrs', forall is_some ctrs') end in ctrss |> map datatype_of_ctrs |> filter #3 end else [] | datatypes_of_sym_table _ _ _ _ _ _ = [] fun decl_line_of_datatype (ty as AType (((_, s'), _), ty_args), ctrs, exhaust) = let val xs = map (fn AType ((name, _), []) => name) ty_args in Datatype_Decl (datatype_decl_prefix ^ ascii_of s', map (rpair []) xs, ty, ctrs, exhaust) end fun pair_append (xs1, xs2) (ys1, ys2) = (xs1 @ ys1, xs2 @ ys2) fun do_uncurried_alias_lines_of_sym ctxt generate_info ctrss mono type_enc sym_tab0 sym_tab base_s0 types in_conj = let fun do_alias ary = let val thy = Proof_Context.theory_of ctxt val (role, maybe_negate) = honor_conj_sym_role in_conj val base_name = base_s0 |> `(make_fixed_const (SOME type_enc)) val T = (case types of [T] => T | _ => robust_const_type thy base_s0) val T_args = robust_const_type_args thy (base_s0, T) val (base_name as (base_s, _), T_args) = mangle_type_args_in_const type_enc base_name T_args val base_ary = min_ary_of sym_tab0 base_s fun do_const name = IConst (name, T, T_args) val filter_ty_args = filter_type_args_in_iterm thy ctrss type_enc val atp_term_of = atp_term_of_iterm ctxt mono type_enc (SOME true) val name1 as (s1, _) = base_name |> ary - 1 > base_ary ? aliased_uncurried (ary - 1) val name2 as (s2, _) = base_name |> aliased_uncurried ary val (arg_Ts, _) = chop_fun ary T val bound_names = 1 upto ary |> map (`I o make_bound_var o string_of_int) val bounds = bound_names ~~ arg_Ts val (first_bounds, last_bound) = bounds |> map (fn (name, T) => IConst (name, T, [])) |> split_last val tm1 = mk_app_op type_enc (list_app (do_const name1) first_bounds) last_bound |> filter_ty_args val tm2 = list_app (do_const name2) (first_bounds @ [last_bound]) |> filter_ty_args val eq = eq_formula type_enc (atomic_types_of T) (map (apsnd (do_bound_type type_enc)) bounds) false (atp_term_of tm1) (atp_term_of tm2) in ([tm1, tm2], [Formula ((uncurried_alias_eq_prefix ^ s2, ""), role, eq |> maybe_negate, NONE, isabelle_info generate_info non_rec_defN helper_rank)]) |> (if ary - 1 = base_ary orelse Symtab.defined sym_tab s1 then I else pair_append (do_alias (ary - 1))) end in do_alias end fun uncurried_alias_lines_of_sym ctxt generate_info ctrss mono type_enc sym_tab0 sym_tab (s, {min_ary, types, in_conj, ...} : sym_info) = (case unprefix_and_unascii const_prefix s of SOME mangled_s => if String.isSubstring uncurried_alias_sep mangled_s then let val base_s0 = mangled_s |> unmangled_invert_const in do_uncurried_alias_lines_of_sym ctxt generate_info ctrss mono type_enc sym_tab0 sym_tab base_s0 types in_conj min_ary end else ([], []) | NONE => ([], [])) fun uncurried_alias_lines_of_sym_table ctxt generate_info ctrss mono type_enc uncurried_aliases sym_tab0 sym_tab = ([], []) |> uncurried_aliases ? Symtab.fold_rev (pair_append o uncurried_alias_lines_of_sym ctxt generate_info ctrss mono type_enc sym_tab0 sym_tab) sym_tab val implicit_declsN = "Could-be-implicit typings" val explicit_declsN = "Explicit typings" val uncurried_alias_eqsN = "Uncurried aliases" val factsN = "Relevant facts" val subclassesN = "Subclasses" val tconsN = "Type constructors" val helpersN = "Helper facts" val conjsN = "Conjectures" val free_typesN = "Free types" (* TFF allows implicit declarations of types, function symbols, and predicate symbols (with "$i" as the type of individuals), but some provers (e.g., SNARK) require explicit declarations. The situation is similar for THF. *) fun default_type pred_sym = let fun typ 0 0 = if pred_sym then bool_atype else individual_atype | typ 0 tm_ary = AFun (individual_atype, typ 0 (tm_ary - 1)) | typ ty_ary tm_ary = APi (replicate ty_ary tvar_a_name, typ 0 tm_ary) in typ end fun undeclared_in_problem problem = let fun do_sym (name as (s, _)) value = if is_tptp_user_symbol s andalso not (String.isPrefix let_bound_var_prefix s) then Symtab.default (s, (name, value)) else I fun do_class name = apfst (apfst (do_sym name ())) val do_bound_tvars = fold do_class o snd fun do_type (AType ((name, _), tys)) = apfst (apsnd (do_sym name (length tys))) #> fold do_type tys | do_type (AFun (ty1, ty2)) = do_type ty1 #> do_type ty2 | do_type (APi (_, ty)) = do_type ty fun do_term pred_sym (ATerm ((name, tys), tms)) = apsnd (do_sym name (fn _ => default_type pred_sym (length tys) (length tms))) #> fold do_type tys #> fold (do_term false) tms | do_term _ (AAbs (((_, ty), tm), args)) = do_type ty #> do_term false tm #> fold (do_term false) args fun do_formula (ATyQuant (_, xs, phi)) = fold (do_type o fst) xs #> fold (fold do_class o snd) xs #> do_formula phi | do_formula (AQuant (_, xs, phi)) = fold do_type (map_filter snd xs) #> do_formula phi | do_formula (AConn (_, phis)) = fold do_formula phis | do_formula (AAtom tm) = do_term true tm fun do_line (Class_Decl (_, _, cls)) = fold do_class cls | do_line (Type_Decl _) = I | do_line (Sym_Decl (_, _, ty)) = do_type ty | do_line (Datatype_Decl (_, xs, ty, tms, _)) = fold do_bound_tvars xs #> do_type ty #> fold (do_term false) tms | do_line (Class_Memb (_, xs, ty, cl)) = fold do_bound_tvars xs #> do_type ty #> do_class cl | do_line (Formula (_, _, phi, _, _)) = do_formula phi val ((cls, tys), syms) = declared_in_atp_problem problem in ((Symtab.empty, Symtab.empty), Symtab.empty) |>> apfst (fold (fn (s, _) => Symtab.default (s, (("", ""), ()))) cls) |>> apsnd (fold (fn (s, _) => Symtab.default (s, (("", ""), 0))) tys) ||> fold (fn (s, _) => Symtab.default (s, (("", ""), K tvar_a_atype))) syms |> fold (fold do_line o snd) problem end fun declare_undeclared_in_problem heading problem = let val ((cls, tys), syms) = undeclared_in_problem problem val decls = Symtab.fold (fn (_, (("", ""), _)) => I (* already declared *) | (s, (cls, ())) => cons (Class_Decl (class_decl_prefix ^ s, cls, []))) cls [] @ Symtab.fold (fn (_, (("", ""), _)) => I (* already declared *) | (s, (ty, ary)) => cons (Type_Decl (type_decl_prefix ^ s, ty, ary))) tys [] @ Symtab.fold (fn (_, (("", ""), _)) => I (* already declared *) | (s, (sym, ty)) => cons (Sym_Decl (sym_decl_prefix ^ s, sym, ty ()))) syms [] in (heading, decls) :: problem end val all_ctrss_of_datatypes = map (map_filter (try dest_Const) o #ctrs) o Ctr_Sugar.ctr_sugars_of val app_op_and_predicator_threshold = 45 fun generate_atp_problem ctxt generate_info format prem_role type_enc mode lam_trans uncurried_aliases readable_names presimp hyp_ts concl_t facts = let val thy = Proof_Context.theory_of ctxt val type_enc = type_enc |> adjust_type_enc format val completish = (case mode of Sledgehammer_Completish k => k | _ => 0) (* Forcing explicit applications is expensive for polymorphic encodings, because it takes only one existential variable ranging over "'a => 'b" to ruin everything. Hence we do it only if there are few facts (which is normally the case for "metis" and the minimizer). *) val app_op_level = if completish > 0 then Full_App_Op_And_Predicator else if length facts + length hyp_ts >= app_op_and_predicator_threshold then if is_type_enc_polymorphic type_enc then Min_App_Op else Sufficient_App_Op else Sufficient_App_Op_And_Predicator val lam_trans = if lam_trans = keep_lamsN andalso not (is_type_enc_full_higher_order type_enc) then liftingN else lam_trans val simp_options = - let val simp = not (is_type_enc_fool type_enc) in - {if_simps = simp, let_simps = simp} - end + {if_simps = not (has_type_enc_ite type_enc), + let_simps = not (has_type_enc_let type_enc)} val (classes, conjs, facts, subclass_pairs, tcon_clauses, lifted) = translate_formulas simp_options ctxt prem_role format type_enc lam_trans presimp hyp_ts concl_t facts val (_, sym_tab0) = sym_table_of_facts ctxt type_enc app_op_level conjs facts val mono = conjs @ facts |> mononotonicity_info_of_facts ctxt type_enc completish val ctrss = all_ctrss_of_datatypes ctxt fun firstorderize in_helper = firstorderize_fact thy ctrss type_enc (uncurried_aliases andalso not in_helper) completish sym_tab0 val (conjs, facts) = (conjs, facts) |> apply2 (map (firstorderize false)) val (ho_stuff, sym_tab) = sym_table_of_facts ctxt type_enc Min_App_Op conjs facts val (uncurried_alias_eq_tms, uncurried_alias_eq_lines) = uncurried_alias_lines_of_sym_table ctxt generate_info ctrss mono type_enc uncurried_aliases sym_tab0 sym_tab val (_, sym_tab) = (ho_stuff, sym_tab) |> fold (add_iterm_syms_to_sym_table ctxt Min_App_Op false false) uncurried_alias_eq_tms val helpers = sym_tab |> helper_facts_of_sym_table ctxt format type_enc lam_trans completish |> map (firstorderize true) val all_facts = helpers @ conjs @ facts val mono_Ts = monotonic_types_of_facts ctxt mono type_enc all_facts val datatypes = datatypes_of_sym_table ctxt ctrss format type_enc uncurried_aliases sym_tab val class_decl_lines = decl_lines_of_classes type_enc classes val sym_decl_lines = (conjs, helpers @ facts, uncurried_alias_eq_tms) |> sym_decl_table_of_facts thy type_enc sym_tab |> lines_of_sym_decl_table ctxt generate_info mono type_enc mono_Ts val datatype_decl_lines = map decl_line_of_datatype datatypes val decl_lines = class_decl_lines @ sym_decl_lines @ datatype_decl_lines val num_facts = length facts val freshen = mode <> Exporter andalso mode <> Translator val pos = mode <> Exporter val rank_of = rank_of_fact_num num_facts val fact_lines = map (line_of_fact ctxt generate_info fact_prefix ascii_of I freshen pos mono type_enc rank_of) (0 upto num_facts - 1 ~~ facts) val subclass_lines = maps (lines_of_subclass_pair generate_info type_enc) subclass_pairs val tcon_lines = map (line_of_tcon_clause generate_info type_enc) tcon_clauses val helper_lines = 0 upto length helpers - 1 ~~ helpers |> map (line_of_fact ctxt generate_info helper_prefix I (K "") false true mono type_enc (K default_rank)) val free_type_lines = lines_of_free_types type_enc (facts @ conjs) val conj_lines = map (line_of_conjecture ctxt mono type_enc) conjs (* Reordering these might confuse the proof reconstruction code. *) val problem = [(explicit_declsN, decl_lines), (uncurried_alias_eqsN, uncurried_alias_eq_lines), (factsN, fact_lines), (subclassesN, subclass_lines), (tconsN, tcon_lines), (helpersN, helper_lines), (free_typesN, free_type_lines), (conjsN, conj_lines)] val problem = problem |> (case format of CNF => ensure_cnf_problem | CNF_UEQ => filter_cnf_ueq_problem | FOF => I | _ => declare_undeclared_in_problem implicit_declsN) val (problem, pool) = problem |> nice_atp_problem readable_names format fun add_sym_ary (s, {min_ary, ...} : sym_info) = min_ary > 0 ? Symtab.insert (op =) (s, min_ary) in (problem, Option.map snd pool |> the_default Symtab.empty, lifted, Symtab.fold add_sym_ary sym_tab Symtab.empty) end (* FUDGE *) val conj_weight = 0.0 val hyp_weight = 0.1 val fact_min_weight = 0.2 val fact_max_weight = 1.0 val type_info_default_weight = 0.8 (* Weights are from 0.0 (most important) to 1.0 (least important). *) fun atp_problem_selection_weights problem = let fun add_term_weights weight (ATerm ((s, _), tms)) = is_tptp_user_symbol s ? Symtab.default (s, weight) #> fold (add_term_weights weight) tms | add_term_weights weight (AAbs ((_, tm), args)) = add_term_weights weight tm #> fold (add_term_weights weight) args fun add_line_weights weight (Formula (_, _, phi, _, _)) = formula_fold NONE (K (add_term_weights weight)) phi | add_line_weights _ _ = I fun add_conjectures_weights [] = I | add_conjectures_weights conjs = let val (hyps, conj) = split_last conjs in add_line_weights conj_weight conj #> fold (add_line_weights hyp_weight) hyps end fun add_facts_weights facts = let val num_facts = length facts fun weight_of j = fact_min_weight + (fact_max_weight - fact_min_weight) * Real.fromInt j / Real.fromInt num_facts in map weight_of (0 upto num_facts - 1) ~~ facts |> fold (uncurry add_line_weights) end val get = these o AList.lookup (op =) problem in Symtab.empty |> add_conjectures_weights (get free_typesN @ get conjsN) |> add_facts_weights (get factsN) |> fold (fold (add_line_weights type_info_default_weight) o get) [explicit_declsN, subclassesN, tconsN] |> Symtab.dest |> sort (prod_ord Real.compare string_ord o apply2 swap) end (* Ugly hack: may make innocent victims (collateral damage) *) fun may_be_app s args = String.isPrefix app_op_name s andalso length args = 2 fun may_be_predicator s = member (op =) [predicator_name, prefixed_predicator_name] s fun strip_predicator (tm as ATerm ((s, _), [tm'])) = if may_be_predicator s then tm' else tm | strip_predicator tm = tm fun make_head_roll (ATerm ((s, _), tms)) = if may_be_app s tms then make_head_roll (hd tms) ||> append (tl tms) else (s, tms) | make_head_roll _ = ("", []) fun strip_up_to_predicator (ATyQuant (_, _, phi)) = strip_up_to_predicator phi | strip_up_to_predicator (AQuant (_, _, phi)) = strip_up_to_predicator phi | strip_up_to_predicator (AConn (_, phis)) = maps strip_up_to_predicator phis | strip_up_to_predicator (AAtom tm) = [strip_predicator tm] fun strip_ahorn_etc (ATyQuant (_, _, phi)) = strip_ahorn_etc phi | strip_ahorn_etc (AQuant (_, _, phi)) = strip_ahorn_etc phi | strip_ahorn_etc (AConn (AImplies, [phi1, phi2])) = strip_ahorn_etc phi2 |>> append (strip_up_to_predicator phi1) | strip_ahorn_etc phi = ([], hd (strip_up_to_predicator phi)) fun strip_iff_etc (ATyQuant (_, _, phi)) = strip_iff_etc phi | strip_iff_etc (AQuant (_, _, phi)) = strip_iff_etc phi | strip_iff_etc (AConn (AIff, [phi1, phi2])) = apply2 strip_up_to_predicator (phi1, phi2) | strip_iff_etc _ = ([], []) val max_term_order_weight = 2147483647 fun atp_problem_term_order_info problem = let fun add_edge s s' = Graph.default_node (s, ()) #> Graph.default_node (s', ()) #> Graph.add_edge_acyclic (s, s') fun add_term_deps head (ATerm ((s, _), args)) = if is_tptp_user_symbol head then (if is_tptp_user_symbol s then perhaps (try (add_edge s head)) else I) #> fold (add_term_deps head) args else I | add_term_deps head (AAbs ((_, tm), args)) = add_term_deps head tm #> fold (add_term_deps head) args fun add_intro_deps pred (Formula (_, role, phi, _, info)) = if pred (role, info) then let val (hyps, concl) = strip_ahorn_etc phi in (case make_head_roll concl of (head, args as _ :: _) => fold (add_term_deps head) (hyps @ args) | _ => I) end else I | add_intro_deps _ _ = I fun add_atom_eq_deps (SOME true) (ATerm ((s, _), [lhs as _, rhs])) = if is_tptp_equal s then (case make_head_roll lhs of (head, args as _ :: _) => fold (add_term_deps head) (rhs :: args) | _ => I) else I | add_atom_eq_deps _ _ = I fun add_eq_deps pred (Formula (_, role, phi, _, info)) = if pred (role, info) then (case strip_iff_etc phi of ([lhs], rhs) => (case make_head_roll lhs of (head, args as _ :: _) => fold (add_term_deps head) (rhs @ args) | _ => I) | _ => formula_fold (SOME (role <> Conjecture)) add_atom_eq_deps phi) else I | add_eq_deps _ _ = I fun has_status status (_, info) = extract_isabelle_status info = SOME status fun is_conj (role, _) = (role = Conjecture orelse role = Hypothesis) val graph = Graph.empty |> fold (fold (add_eq_deps (has_status non_rec_defN)) o snd) problem |> fold (fold (add_eq_deps (has_status rec_defN orf has_status simpN orf is_conj)) o snd) problem |> fold (fold (add_intro_deps (has_status inductiveN)) o snd) problem |> fold (fold (add_intro_deps (has_status introN)) o snd) problem fun next_weight w = if w + w <= max_term_order_weight then w + w else w + 1 fun add_weights _ [] = I | add_weights weight syms = fold (AList.update (op =) o rpair weight) syms #> add_weights (next_weight weight) (fold (union (op =) o Graph.immediate_succs graph) syms []) in (* Sorting is not just for aesthetics: It specifies the precedence order for the term ordering (KBO or LPO), from smaller to larger values. *) [] |> add_weights 1 (Graph.minimals graph) |> sort (int_ord o apply2 snd) end end; diff --git a/src/HOL/Tools/Metis/metis_generate.ML b/src/HOL/Tools/Metis/metis_generate.ML --- a/src/HOL/Tools/Metis/metis_generate.ML +++ b/src/HOL/Tools/Metis/metis_generate.ML @@ -1,243 +1,240 @@ (* Title: HOL/Tools/Metis/metis_generate.ML Author: Jia Meng, Cambridge University Computer Laboratory and NICTA Author: Kong W. Susanto, Cambridge University Computer Laboratory Author: Lawrence C. Paulson, Cambridge University Computer Laboratory Author: Jasmin Blanchette, TU Muenchen Translation of HOL to FOL for Metis. *) signature METIS_GENERATE = sig type type_enc = ATP_Problem_Generate.type_enc datatype isa_thm = Isa_Reflexive_or_Trivial | Isa_Lambda_Lifted | Isa_Raw of thm val metis_equal : string val metis_predicator : string val metis_app_op : string val metis_systematic_type_tag : string val metis_ad_hoc_type_tag : string val metis_generated_var_prefix : string val trace : bool Config.T val verbose : bool Config.T val trace_msg : Proof.context -> (unit -> string) -> unit val verbose_warning : Proof.context -> string -> unit val metis_name_table : ((string * int) * ((type_enc -> string) * bool)) list val reveal_old_skolem_terms : (string * term) list -> term -> term val reveal_lam_lifted : (string * term) list -> term -> term val generate_metis_problem : Proof.context -> type_enc -> string -> thm list -> thm list -> int Symtab.table * (Metis_Thm.thm * isa_thm) list * (unit -> (string * int) list) * ((string * term) list * (string * term) list) end structure Metis_Generate : METIS_GENERATE = struct open ATP_Problem open ATP_Problem_Generate val metis_equal = "=" val metis_predicator = "{}" val metis_app_op = Metis_Name.toString Metis_Term.appName val metis_systematic_type_tag = Metis_Name.toString Metis_Term.hasTypeFunctionName val metis_ad_hoc_type_tag = "**" val metis_generated_var_prefix = "_" val trace = Attrib.setup_config_bool \<^binding>\metis_trace\ (K false) val verbose = Attrib.setup_config_bool \<^binding>\metis_verbose\ (K true) fun trace_msg ctxt msg = if Config.get ctxt trace then tracing (msg ()) else () fun verbose_warning ctxt msg = if Config.get ctxt verbose then warning ("Metis: " ^ msg) else () val metis_name_table = [((tptp_equal, 2), (K metis_equal, false)), ((tptp_old_equal, 2), (K metis_equal, false)), ((prefixed_predicator_name, 1), (K metis_predicator, false)), ((prefixed_app_op_name, 2), (K metis_app_op, false)), ((prefixed_type_tag_name, 2), (fn type_enc => if level_of_type_enc type_enc = All_Types then metis_systematic_type_tag else metis_ad_hoc_type_tag, true))] fun old_skolem_const_name i j num_T_args = Long_Name.implode (old_skolem_const_prefix :: map string_of_int [i, j, num_T_args]) fun conceal_old_skolem_terms i old_skolems t = if exists_Const (curry (op =) \<^const_name>\Meson.skolem\ o fst) t then let fun aux old_skolems (t as \<^Const_>\Meson.skolem T for _\) = let val (old_skolems, s) = if i = ~1 then (old_skolems, \<^const_name>\undefined\) else (case AList.find (op aconv) old_skolems t of s :: _ => (old_skolems, s) | [] => let val s = old_skolem_const_name i (length old_skolems) (length (Term.add_tvarsT T [])) in ((s, t) :: old_skolems, s) end) in (old_skolems, Const (s, T)) end | aux old_skolems (t1 $ t2) = let val (old_skolems, t1) = aux old_skolems t1 val (old_skolems, t2) = aux old_skolems t2 in (old_skolems, t1 $ t2) end | aux old_skolems (Abs (s, T, t')) = let val (old_skolems, t') = aux old_skolems t' in (old_skolems, Abs (s, T, t')) end | aux old_skolems t = (old_skolems, t) in aux old_skolems t end else (old_skolems, t) fun reveal_old_skolem_terms old_skolems = map_aterms (fn t as Const (s, _) => if String.isPrefix old_skolem_const_prefix s then AList.lookup (op =) old_skolems s |> the |> map_types (map_type_tvar (K dummyT)) else t | t => t) fun reveal_lam_lifted lifted = map_aterms (fn t as Const (s, _) => if String.isPrefix lam_lifted_prefix s then (case AList.lookup (op =) lifted s of SOME t => \<^Const>\Metis.lambda dummyT\ $ map_types (map_type_tvar (K dummyT)) (reveal_lam_lifted lifted t) | NONE => t) else t | t => t) (* ------------------------------------------------------------------------- *) (* Logic maps manage the interface between HOL and first-order logic. *) (* ------------------------------------------------------------------------- *) datatype isa_thm = Isa_Reflexive_or_Trivial | Isa_Lambda_Lifted | Isa_Raw of thm val proxy_defs = map (fst o snd o snd) proxy_table fun prepare_helper ctxt = Meson.make_meta_clause ctxt #> rewrite_rule ctxt (map safe_mk_meta_eq proxy_defs) fun metis_term_of_atp type_enc (ATerm ((s, []), tms)) = if is_tptp_variable s then Metis_Term.Var (Metis_Name.fromString s) else (case AList.lookup (op =) metis_name_table (s, length tms) of SOME (f, swap) => (f type_enc, swap) | NONE => (s, false)) |> (fn (s, swap) => Metis_Term.Fn (Metis_Name.fromString s, tms |> map (metis_term_of_atp type_enc) |> swap ? rev)) fun metis_atom_of_atp type_enc (AAtom tm) = (case metis_term_of_atp type_enc tm of Metis_Term.Fn x => x | _ => raise Fail "non CNF -- expected function") | metis_atom_of_atp _ _ = raise Fail "not CNF -- expected atom" fun metis_literal_of_atp type_enc (AConn (ANot, [phi])) = (false, metis_atom_of_atp type_enc phi) | metis_literal_of_atp type_enc phi = (true, metis_atom_of_atp type_enc phi) fun metis_literals_of_atp type_enc (AConn (AOr, phis)) = maps (metis_literals_of_atp type_enc) phis | metis_literals_of_atp type_enc phi = [metis_literal_of_atp type_enc phi] fun metis_axiom_of_atp ctxt type_enc clauses (Formula ((ident, _), _, phi, _, _)) = let fun some isa = SOME (phi |> metis_literals_of_atp type_enc |> Metis_LiteralSet.fromList |> Metis_Thm.axiom, isa) in if String.isPrefix tags_sym_formula_prefix ident then Isa_Reflexive_or_Trivial |> some else if String.isPrefix conjecture_prefix ident then NONE else if String.isPrefix helper_prefix ident then (case (String.isSuffix typed_helper_suffix ident, space_explode "_" ident) of (needs_fairly_sound, _ :: const :: j :: _) => nth (AList.lookup (op =) (helper_table true) (const, needs_fairly_sound) |> the) (the (Int.fromString j) - 1) |> snd |> prepare_helper ctxt |> Isa_Raw |> some | _ => raise Fail ("malformed helper identifier " ^ quote ident)) else (case try (unprefix fact_prefix) ident of SOME s => let val s = s |> space_explode "_" |> tl |> space_implode "_" in (case Int.fromString s of SOME j => Meson.make_meta_clause ctxt (snd (nth clauses j)) |> Isa_Raw |> some | NONE => if String.isPrefix lam_fact_prefix (unascii_of s) then Isa_Lambda_Lifted |> some else raise Fail ("malformed fact identifier " ^ quote ident)) end | NONE => some (Isa_Raw TrueI)) end | metis_axiom_of_atp _ _ _ _ = raise Fail "not CNF -- expected formula" fun eliminate_lam_wrappers \<^Const_>\Metis.lambda _ for t\ = eliminate_lam_wrappers t | eliminate_lam_wrappers (t $ u) = eliminate_lam_wrappers t $ eliminate_lam_wrappers u | eliminate_lam_wrappers (Abs (s, T, t)) = Abs (s, T, eliminate_lam_wrappers t) | eliminate_lam_wrappers t = t (* Function to generate metis clauses, including comb and type clauses *) fun generate_metis_problem ctxt type_enc lam_trans conj_clauses fact_clauses = let val (conj_clauses, fact_clauses) = if is_type_enc_polymorphic type_enc then (conj_clauses, fact_clauses) else conj_clauses @ fact_clauses |> map (pair 0) |> Monomorph.monomorph atp_schematic_consts_of ctxt |> chop (length conj_clauses) |> apply2 (maps (map (zero_var_indexes o snd))) - val num_conjs = length conj_clauses (* Pretend every clause is a "simp" rule, to guide the term ordering. *) val clauses = - map2 (fn j => pair (Int.toString j, (Local, Simp))) (0 upto num_conjs - 1) conj_clauses @ - map2 (fn j => pair (Int.toString (num_conjs + j), (Local, Simp))) - (0 upto length fact_clauses - 1) fact_clauses + map_index (apfst (fn j => (Int.toString j, (Local, Simp)))) (conj_clauses @ fact_clauses) val (old_skolems, props) = fold_rev (fn (name, th) => fn (old_skolems, props) => th |> Thm.prop_of |> Logic.strip_imp_concl |> conceal_old_skolem_terms (length clauses) old_skolems ||> lam_trans = liftingN ? eliminate_lam_wrappers ||> (fn prop => (name, prop) :: props)) clauses ([], []) (* val _ = tracing ("PROPS:\n" ^ cat_lines (map (Syntax.string_of_term ctxt o snd) props)) *) val lam_trans = if lam_trans = combsN then no_lamsN else lam_trans val (atp_problem, _, lifted, sym_tab) = generate_atp_problem ctxt true CNF Hypothesis type_enc Metis lam_trans false false false [] \<^prop>\False\ props (* val _ = tracing ("ATP PROBLEM: " ^ cat_lines (lines_of_atp_problem CNF atp_problem)) *) (* "rev" is for compatibility with existing proof scripts. *) val axioms = atp_problem |> maps (map_filter (metis_axiom_of_atp ctxt type_enc clauses) o snd) |> rev fun ord_info () = atp_problem_term_order_info atp_problem in (sym_tab, axioms, ord_info, (lifted, old_skolems)) end end; diff --git a/src/HOL/Tools/Metis/metis_reconstruct.ML b/src/HOL/Tools/Metis/metis_reconstruct.ML --- a/src/HOL/Tools/Metis/metis_reconstruct.ML +++ b/src/HOL/Tools/Metis/metis_reconstruct.ML @@ -1,757 +1,757 @@ (* Title: HOL/Tools/Metis/metis_reconstruct.ML Author: Kong W. Susanto, Cambridge University Computer Laboratory Author: Lawrence C. Paulson, Cambridge University Computer Laboratory Author: Jasmin Blanchette, TU Muenchen Copyright Cambridge University 2007 Proof reconstruction for Metis. *) signature METIS_RECONSTRUCT = sig type type_enc = ATP_Problem_Generate.type_enc exception METIS_RECONSTRUCT of string * string val hol_clause_of_metis : Proof.context -> type_enc -> int Symtab.table -> (string * term) list * (string * term) list -> Metis_Thm.thm -> term val lookth : (Metis_Thm.thm * 'a) list -> Metis_Thm.thm -> 'a val replay_one_inference : Proof.context -> type_enc -> (string * term) list * (string * term) list -> int Symtab.table -> Metis_Thm.thm * Metis_Proof.inference -> (Metis_Thm.thm * thm) list -> (Metis_Thm.thm * thm) list val discharge_skolem_premises : Proof.context -> (thm * term) option list -> thm -> thm end; structure Metis_Reconstruct : METIS_RECONSTRUCT = struct open ATP_Problem open ATP_Problem_Generate open ATP_Proof_Reconstruct open Metis_Generate exception METIS_RECONSTRUCT of string * string val meta_not_not = @{thms not_not[THEN eq_reflection]} fun atp_name_of_metis type_enc s = (case find_first (fn (_, (f, _)) => f type_enc = s) metis_name_table of SOME ((s, _), (_, swap)) => (s, swap) | _ => (s, false)) fun atp_term_of_metis type_enc (Metis_Term.Fn (s, tms)) = let val (s, swap) = atp_name_of_metis type_enc (Metis_Name.toString s) in ATerm ((s, []), tms |> map (atp_term_of_metis type_enc) |> swap ? rev) end | atp_term_of_metis _ (Metis_Term.Var s) = ATerm ((Metis_Name.toString s, []), []) fun hol_term_of_metis ctxt type_enc sym_tab = atp_term_of_metis type_enc #> term_of_atp ctxt ATP_Problem.CNF type_enc false sym_tab NONE fun atp_literal_of_metis type_enc (pos, atom) = atom |> Metis_Term.Fn |> atp_term_of_metis type_enc |> AAtom |> not pos ? mk_anot fun atp_clause_of_metis _ [] = AAtom (ATerm ((tptp_false, []), [])) | atp_clause_of_metis type_enc lits = lits |> map (atp_literal_of_metis type_enc) |> mk_aconns AOr fun polish_hol_terms ctxt (lifted, old_skolems) = map (reveal_lam_lifted lifted #> reveal_old_skolem_terms old_skolems) #> Syntax.check_terms (Proof_Context.set_mode Proof_Context.mode_pattern ctxt) fun hol_clause_of_metis ctxt type_enc sym_tab concealed = Metis_Thm.clause #> Metis_LiteralSet.toList #> atp_clause_of_metis type_enc #> prop_of_atp ctxt ATP_Problem.CNF type_enc false sym_tab #> singleton (polish_hol_terms ctxt concealed) fun hol_terms_of_metis ctxt type_enc concealed sym_tab fol_tms = let val ts = map (hol_term_of_metis ctxt type_enc sym_tab) fol_tms val _ = trace_msg ctxt (fn () => " calling type inference:") val _ = List.app (fn t => trace_msg ctxt (fn () => Syntax.string_of_term ctxt t)) ts val ts' = ts |> polish_hol_terms ctxt concealed val _ = List.app (fn t => trace_msg ctxt (fn () => " final term: " ^ Syntax.string_of_term ctxt t ^ " of type " ^ Syntax.string_of_typ ctxt (type_of t))) ts' in ts' end (** FOL step Inference Rules **) fun lookth th_pairs fol_th = (case AList.lookup (uncurry Metis_Thm.equal) th_pairs fol_th of SOME th => th | NONE => raise Fail ("Failed to find Metis theorem " ^ Metis_Thm.toString fol_th)) fun cterm_incr_types ctxt idx = Thm.cterm_of ctxt o map_types (Logic.incr_tvar idx) (* INFERENCE RULE: AXIOM *) (*This causes variables to have an index of 1 by default. See also "term_of_atp" in "ATP_Proof_Reconstruct".*) val axiom_inference = Thm.incr_indexes 1 oo lookth (* INFERENCE RULE: ASSUME *) fun excluded_middle P = \<^instantiate>\P in lemma (open) \P \ \ P \ False\ by (rule notE)\ fun assume_inference ctxt type_enc concealed sym_tab atom = singleton (hol_terms_of_metis ctxt type_enc concealed sym_tab) (Metis_Term.Fn atom) |> Thm.cterm_of ctxt |> excluded_middle (* INFERENCE RULE: INSTANTIATE (SUBST). *) (*Type instantiations are ignored. Trying to reconstruct them admits new possibilities of errors, e.g. concerning sorts. Instead we try to arrange hat new TVars are distinct and that types can be inferred from terms.*) fun inst_inference ctxt type_enc concealed sym_tab th_pairs fsubst th = let val i_th = lookth th_pairs th val i_th_vars = Term.add_vars (Thm.prop_of i_th) [] fun find_var x = the (List.find (fn ((a,_),_) => a=x) i_th_vars) fun subst_translation (x,y) = let val v = find_var x (*We call "polish_hol_terms" below.*) val t = hol_term_of_metis ctxt type_enc sym_tab y in SOME (Thm.cterm_of ctxt (Var v), t) end handle Option.Option => (trace_msg ctxt (fn () => "\"find_var\" failed for " ^ x ^ " in " ^ Thm.string_of_thm ctxt i_th); NONE) | TYPE _ => (trace_msg ctxt (fn () => "\"hol_term_of_metis\" failed for " ^ x ^ " in " ^ Thm.string_of_thm ctxt i_th); NONE) fun remove_typeinst (a, t) = let val a = Metis_Name.toString a in (case unprefix_and_unascii schematic_var_prefix a of SOME b => SOME (b, t) | NONE => (case unprefix_and_unascii tvar_prefix a of SOME _ => NONE (*type instantiations are forbidden*) | NONE => SOME (a, t) (*internal Metis var?*))) end val _ = trace_msg ctxt (fn () => " isa th: " ^ Thm.string_of_thm ctxt i_th) val substs = map_filter remove_typeinst (Metis_Subst.toList fsubst) val (vars, tms) = ListPair.unzip (map_filter subst_translation substs) ||> polish_hol_terms ctxt concealed val ctm_of = cterm_incr_types ctxt (Thm.maxidx_of i_th + 1) val substs' = ListPair.zip (vars, map ctm_of tms) val _ = trace_msg ctxt (fn () => cat_lines ("subst_translations:" :: (substs' |> map (fn (x, y) => Syntax.string_of_term ctxt (Thm.term_of x) ^ " |-> " ^ Syntax.string_of_term ctxt (Thm.term_of y))))) in infer_instantiate_types ctxt (map (apfst (dest_Var o Thm.term_of)) substs') i_th end handle THM (msg, _, _) => raise METIS_RECONSTRUCT ("inst_inference", msg) | ERROR msg => raise METIS_RECONSTRUCT ("inst_inference", msg) (* INFERENCE RULE: RESOLVE *) (*Increment the indexes of only the type variables*) fun incr_type_indexes ctxt inc th = let val tvs = Term.add_tvars (Thm.full_prop_of th) [] fun inc_tvar ((a, i), s) = (((a, i), s), Thm.ctyp_of ctxt (TVar ((a, i + inc), s))) in Thm.instantiate (TVars.make (map inc_tvar tvs), Vars.empty) th end (*Like RSN, but we rename apart only the type variables. Vars here typically have an index of 1, and the use of RSN would increase this typically to 3. Instantiations of those Vars could then fail.*) fun resolve_inc_tyvars ctxt th1 i th2 = let val th1' = incr_type_indexes ctxt (Thm.maxidx_of th2 + 1) th1 fun res (tha, thb) = (case Thm.bicompose (SOME ctxt) {flatten = true, match = false, incremented = true} (false, Thm.close_derivation \<^here> tha, Thm.nprems_of tha) i thb |> Seq.list_of |> distinct Thm.eq_thm of [th] => th | _ => let val thaa'bb' as [(tha', _), (thb', _)] = map (`(Local_Defs.unfold0 ctxt meta_not_not)) [tha, thb] in if forall Thm.eq_thm_prop thaa'bb' then raise THM ("resolve_inc_tyvars: unique result expected", i, [tha, thb]) else res (tha', thb') end) in res (th1', th2) handle TERM z => let val ps = [] |> fold (Term.add_vars o Thm.prop_of) [th1', th2] |> AList.group (op =) |> maps (fn ((s, _), T :: Ts) => map (fn T' => (Free (s, T), Free (s, T'))) Ts) |> rpair Envir.init |-> fold (Pattern.unify (Context.Proof ctxt)) |> Envir.type_env |> Vartab.dest |> map (fn (x, (S, T)) => ((x, S), Thm.ctyp_of ctxt T)) in (*The unifier, which is invoked from "Thm.bicompose", will sometimes refuse to unify "?a::?'a" with "?a::?'b" or "?a::nat" and throw a "TERM" exception (with "add_ffpair" as first argument). We then perform unification of the types of variables by hand and try again. We could do this the first time around but this error occurs seldom and we don't want to break existing proofs in subtle ways or slow them down.*) if null ps then raise TERM z else res (apply2 (Drule.instantiate_normalize (TVars.make ps, Vars.empty)) (th1', th2)) end end fun s_not \<^Const_>\Not for t\ = t | s_not t = HOLogic.mk_not t fun simp_not_not \<^Const_>\Trueprop for t\ = \<^Const>\Trueprop for \simp_not_not t\\ | simp_not_not \<^Const_>\Not for t\ = s_not (simp_not_not t) | simp_not_not t = t val normalize_literal = simp_not_not o Envir.eta_contract (*Find the relative location of an untyped term within a list of terms as a 1-based index. Returns 0 in case of failure.*) fun index_of_literal lit haystack = let fun match_lit normalize = HOLogic.dest_Trueprop #> normalize #> curry Term.aconv_untyped (lit |> normalize) in (case find_index (match_lit I) haystack of ~1 => find_index (match_lit (simp_not_not o Envir.eta_contract)) haystack | j => j) + 1 end (*Permute a rule's premises to move the i-th premise to the last position.*) fun make_last i th = let val n = Thm.nprems_of th in if i >= 1 andalso i <= n then Thm.permute_prems (i - 1) 1 th else raise THM ("select_literal", i, [th]) end (*Maps a rule that ends "... ==> P ==> False" to "... ==> ~ P" while avoiding to create double negations. The "select" wrapper is a trick to ensure that "P ==> ~ False ==> False" is rewritten to "P ==> False", not to "~ P". We don't use this trick in general because it makes the proof object uglier than necessary. FIXME.*) fun negate_head ctxt th = if exists (fn t => t aconv \<^prop>\\ False\) (Thm.prems_of th) then (th RS @{thm select_FalseI}) |> fold (rewrite_rule ctxt o single) @{thms not_atomize_select atomize_not_select} else th |> fold (rewrite_rule ctxt o single) @{thms not_atomize atomize_not} (* Maps the clause [P1,...Pn]==>False to [P1,...,P(i-1),P(i+1),...Pn] ==> ~P *) fun select_literal ctxt = negate_head ctxt oo make_last fun resolve_inference ctxt type_enc concealed sym_tab th_pairs atom th1 th2 = let val (i_th1, i_th2) = apply2 (lookth th_pairs) (th1, th2) val _ = trace_msg ctxt (fn () => " isa th1 (pos): " ^ Thm.string_of_thm ctxt i_th1 ^ "\n\ \ isa th2 (neg): " ^ Thm.string_of_thm ctxt i_th2) in (* Trivial cases where one operand is type info *) if Thm.eq_thm (TrueI, i_th1) then i_th2 else if Thm.eq_thm (TrueI, i_th2) then i_th1 else let val i_atom = singleton (hol_terms_of_metis ctxt type_enc concealed sym_tab) (Metis_Term.Fn atom) val _ = trace_msg ctxt (fn () => " atom: " ^ Syntax.string_of_term ctxt i_atom) in (case index_of_literal (s_not i_atom) (Thm.prems_of i_th1) of 0 => (trace_msg ctxt (fn () => "Failed to find literal in \"th1\""); i_th1) | j1 => (trace_msg ctxt (fn () => " index th1: " ^ string_of_int j1); (case index_of_literal i_atom (Thm.prems_of i_th2) of 0 => (trace_msg ctxt (fn () => "Failed to find literal in \"th2\""); i_th2) | j2 => (trace_msg ctxt (fn () => " index th2: " ^ string_of_int j2); resolve_inc_tyvars ctxt (select_literal ctxt j1 i_th1) j2 i_th2 handle TERM (s, _) => raise METIS_RECONSTRUCT ("resolve_inference", s))))) end end (* INFERENCE RULE: REFL *) val REFL_THM = Thm.incr_indexes 2 @{lemma "x \ x \ False" by (drule notE) (rule refl)} val [refl_x] = Term.add_vars (Thm.prop_of REFL_THM) []; fun refl_inference ctxt type_enc concealed sym_tab t = let val i_t = singleton (hol_terms_of_metis ctxt type_enc concealed sym_tab) t val _ = trace_msg ctxt (fn () => " term: " ^ Syntax.string_of_term ctxt i_t) val c_t = cterm_incr_types ctxt (Thm.maxidx_of REFL_THM + 1) i_t in infer_instantiate_types ctxt [(refl_x, c_t)] REFL_THM end (* INFERENCE RULE: EQUALITY *) val subst_em = @{lemma "s = t \ P s \ \ P t \ False" by (erule notE) (erule subst)} val ssubst_em = @{lemma "s = t \ P t \ \ P s \ False" by (erule notE) (erule ssubst)} fun equality_inference ctxt type_enc concealed sym_tab (pos, atom) fp fr = let val m_tm = Metis_Term.Fn atom val [i_atom, i_tm] = hol_terms_of_metis ctxt type_enc concealed sym_tab [m_tm, fr] val _ = trace_msg ctxt (fn () => "sign of the literal: " ^ Bool.toString pos) fun replace_item_list lx 0 (_::ls) = lx::ls | replace_item_list lx i (l::ls) = l :: replace_item_list lx (i-1) ls fun path_finder_fail tm ps t = raise METIS_RECONSTRUCT ("equality_inference (path_finder)", "path = " ^ space_implode " " (map string_of_int ps) ^ " isa-term: " ^ Syntax.string_of_term ctxt tm ^ (case t of SOME t => " fol-term: " ^ Metis_Term.toString t | NONE => "")) fun path_finder tm [] _ = (tm, Bound 0) | path_finder tm (p :: ps) (t as Metis_Term.Fn (s, ts)) = let val s = s |> Metis_Name.toString |> perhaps (try (unprefix_and_unascii const_prefix #> the #> unmangled_const_name #> hd)) in if s = metis_predicator orelse s = predicator_name orelse s = metis_systematic_type_tag orelse s = metis_ad_hoc_type_tag orelse s = type_tag_name then path_finder tm ps (nth ts p) else if s = metis_app_op orelse s = app_op_name then let val (tm1, tm2) = dest_comb tm val p' = p - (length ts - 2) in if p' = 0 then path_finder tm1 ps (nth ts p) ||> (fn y => y $ tm2) else path_finder tm2 ps (nth ts p) ||> (fn y => tm1 $ y) end else let val (tm1, args) = strip_comb tm val adjustment = length ts - length args val p' = if adjustment > p then p else p - adjustment val tm_p = nth args p' handle General.Subscript => path_finder_fail tm (p :: ps) (SOME t) val _ = trace_msg ctxt (fn () => "path_finder: " ^ string_of_int p ^ " " ^ Syntax.string_of_term ctxt tm_p) val (r, t) = path_finder tm_p ps (nth ts p) in (r, list_comb (tm1, replace_item_list t p' args)) end end | path_finder tm ps t = path_finder_fail tm ps (SOME t) val (tm_subst, body) = path_finder i_atom fp m_tm val tm_abs = Abs ("x", type_of tm_subst, body) val _ = trace_msg ctxt (fn () => "abstraction: " ^ Syntax.string_of_term ctxt tm_abs) val _ = trace_msg ctxt (fn () => "i_tm: " ^ Syntax.string_of_term ctxt i_tm) val _ = trace_msg ctxt (fn () => "located term: " ^ Syntax.string_of_term ctxt tm_subst) val maxidx = fold Term.maxidx_term [i_tm, tm_abs, tm_subst] ~1 val subst' = Thm.incr_indexes (maxidx + 1) (if pos then subst_em else ssubst_em) val _ = trace_msg ctxt (fn () => "subst' " ^ Thm.string_of_thm ctxt subst') val eq_terms = map (apply2 (Thm.cterm_of ctxt)) (ListPair.zip (Misc_Legacy.term_vars (Thm.prop_of subst'), [tm_abs, tm_subst, i_tm])) in infer_instantiate_types ctxt (map (apfst (dest_Var o Thm.term_of)) eq_terms) subst' end val factor = Seq.hd o distinct_subgoals_tac fun one_step ctxt type_enc concealed sym_tab th_pairs (fol_th, inference) = (case inference of Metis_Proof.Axiom _ => axiom_inference th_pairs fol_th |> factor | Metis_Proof.Assume atom => assume_inference ctxt type_enc concealed sym_tab atom | Metis_Proof.Metis_Subst (subst, th1) => inst_inference ctxt type_enc concealed sym_tab th_pairs subst th1 |> factor | Metis_Proof.Resolve (atom, th1, th2) => resolve_inference ctxt type_enc concealed sym_tab th_pairs atom th1 th2 |> factor | Metis_Proof.Refl tm => refl_inference ctxt type_enc concealed sym_tab tm | Metis_Proof.Equality (lit, p, r) => equality_inference ctxt type_enc concealed sym_tab lit p r) fun flexflex_first_order ctxt th = (case Thm.tpairs_of th of [] => th | pairs => let val thy = Proof_Context.theory_of ctxt val (tyenv, tenv) = fold (Pattern.first_order_match thy) pairs (Vartab.empty, Vartab.empty) fun mkT (v, (S, T)) = ((v, S), Thm.ctyp_of ctxt T) fun mk (v, (T, t)) = ((v, Envir.subst_type tyenv T), Thm.cterm_of ctxt t) val instsT = Vartab.fold (cons o mkT) tyenv [] val insts = Vartab.fold (cons o mk) tenv [] in Thm.instantiate (TVars.make instsT, Vars.make insts) th end handle THM _ => th) fun is_metis_literal_genuine (_, (s, _)) = not (String.isPrefix class_prefix (Metis_Name.toString s)) fun is_isabelle_literal_genuine t = (case t of _ $ \<^Const_>\Meson.skolem _ for _\ => false | _ => true) fun count p xs = fold (fn x => if p x then Integer.add 1 else I) xs 0 (*Seldomly needed hack. A Metis clause is represented as a set, so duplicate disjuncts are impossible. In the Isabelle proof, in spite of efforts to eliminate them, duplicates sometimes appear with slightly different (but unifiable) types.*) fun resynchronize ctxt fol_th th = let val num_metis_lits = count is_metis_literal_genuine (Metis_LiteralSet.toList (Metis_Thm.clause fol_th)) val num_isabelle_lits = count is_isabelle_literal_genuine (Thm.prems_of th) in if num_metis_lits >= num_isabelle_lits then th else let val (prems0, concl) = th |> Thm.prop_of |> Logic.strip_horn val prems = prems0 |> map normalize_literal |> distinct Term.aconv_untyped val goal = Logic.list_implies (prems, concl) val ctxt' = fold Thm.declare_hyps (Thm.chyps_of th) ctxt val tac = cut_tac th 1 THEN rewrite_goals_tac ctxt' meta_not_not THEN ALLGOALS (assume_tac ctxt') in if length prems = length prems0 then raise METIS_RECONSTRUCT ("resynchronize", "Out of sync") else Goal.prove ctxt' [] [] goal (K tac) |> resynchronize ctxt' fol_th end end fun replay_one_inference ctxt type_enc concealed sym_tab (fol_th, inf) th_pairs = if not (null th_pairs) andalso Thm.prop_of (snd (hd th_pairs)) aconv \<^prop>\False\ then (*Isabelle sometimes identifies literals (premises) that are distinct in Metis (e.g., because of type variables). We give the Isabelle proof the benefice of the doubt.*) th_pairs else let val _ = trace_msg ctxt (fn () => "=============================================") val _ = trace_msg ctxt (fn () => "METIS THM: " ^ Metis_Thm.toString fol_th) val _ = trace_msg ctxt (fn () => "INFERENCE: " ^ Metis_Proof.inferenceToString inf) val th = one_step ctxt type_enc concealed sym_tab th_pairs (fol_th, inf) |> flexflex_first_order ctxt |> resynchronize ctxt fol_th val _ = trace_msg ctxt (fn () => "ISABELLE THM: " ^ Thm.string_of_thm ctxt th) val _ = trace_msg ctxt (fn () => "=============================================") in (fol_th, th) :: th_pairs end (*It is normally sufficient to apply "assume_tac" to unify the conclusion with one of the premises. Unfortunately, this sometimes yields "Variable has two distinct types" errors. To avoid this, we instantiate the variables before applying "assume_tac". Typical constraints are of the form ?SK_a_b_c_x SK_d_e_f_y ... SK_a_b_c_x ... SK_g_h_i_z \\<^sup>? SK_a_b_c_x, where the nonvariables are goal parameters.*) fun unify_first_prem_with_concl ctxt i th = let val goal = Logic.get_goal (Thm.prop_of th) i |> Envir.beta_eta_contract val prem = goal |> Logic.strip_assums_hyp |> hd val concl = goal |> Logic.strip_assums_concl fun pair_untyped_aconv (t1, t2) (u1, u2) = Term.aconv_untyped (t1, u1) andalso Term.aconv_untyped (t2, u2) fun add_terms tp inst = if exists (pair_untyped_aconv tp) inst then inst else tp :: map (apsnd (subst_atomic [tp])) inst fun is_flex t = (case strip_comb t of (Var _, args) => forall is_Bound args | _ => false) fun unify_flex flex rigid = (case strip_comb flex of (Var (z as (_, T)), args) => add_terms (Var z, fold_rev absdummy (take (length args) (binder_types T)) rigid) | _ => I) fun unify_potential_flex comb atom = if is_flex comb then unify_flex comb atom else if is_Var atom then add_terms (atom, comb) else I fun unify_terms (t, u) = (case (t, u) of (t1 $ t2, u1 $ u2) => if is_flex t then unify_flex t u else if is_flex u then unify_flex u t else fold unify_terms [(t1, u1), (t2, u2)] | (_ $ _, _) => unify_potential_flex t u | (_, _ $ _) => unify_potential_flex u t | (Var _, _) => add_terms (t, u) | (_, Var _) => add_terms (u, t) | _ => I) val t_inst = [] |> try (unify_terms (prem, concl) #> map (apply2 (Thm.cterm_of ctxt))) |> the_default [] (* FIXME *) in infer_instantiate_types ctxt (map (apfst (dest_Var o Thm.term_of)) t_inst) th end val copy_prem = @{lemma "P \ (P \ P \ Q) \ Q" by assumption} fun copy_prems_tac ctxt [] ns i = if forall (curry (op =) 1) ns then all_tac else copy_prems_tac ctxt (rev ns) [] i | copy_prems_tac ctxt (1 :: ms) ns i = rotate_tac 1 i THEN copy_prems_tac ctxt ms (1 :: ns) i | copy_prems_tac ctxt (m :: ms) ns i = eresolve_tac ctxt [copy_prem] i THEN copy_prems_tac ctxt ms (m div 2 :: (m + 1) div 2 :: ns) i (*Metis generates variables of the form _nnn.*) val is_metis_fresh_variable = String.isPrefix "_" fun instantiate_forall_tac ctxt t i st = let val params = Logic.strip_params (Logic.get_goal (Thm.prop_of st) i) |> rev fun repair (t as (Var ((s, _), _))) = (case find_index (fn (s', _) => s' = s) params of ~1 => t | j => Bound j) | repair (t $ u) = (case (repair t, repair u) of (t as Bound j, u as Bound k) => (*This is a trick to repair the discrepancy between the fully skolemized term that MESON gives us (where existentials were pulled out) and the reality.*) if k > j then t else t $ u | (t, u) => t $ u) | repair t = t val t' = t |> repair |> fold (absdummy o snd) params fun do_instantiate th = (case Term.add_vars (Thm.prop_of th) [] |> filter_out ((Meson_Clausify.is_zapped_var_name orf is_metis_fresh_variable) o fst o fst) of [] => th | [var as (_, T)] => let val var_binder_Ts = T |> binder_types |> take (length params) |> rev val var_body_T = T |> funpow (length params) range_type val tyenv = Vartab.empty |> Type.raw_unifys (fastype_of t :: map snd params, var_body_T :: var_binder_Ts) val env = Envir.Envir {maxidx = Vartab.fold (Integer.max o snd o fst) tyenv 0, tenv = Vartab.empty, tyenv = tyenv} val ty_inst = Vartab.fold (fn (x, (S, T)) => cons (((x, S), Thm.ctyp_of ctxt T))) tyenv [] val t_inst = [apply2 (Thm.cterm_of ctxt o Envir.norm_term env) (Var var, t')] in Drule.instantiate_normalize (TVars.make ty_inst, Vars.make (map (apfst (dest_Var o Thm.term_of)) t_inst)) th end | _ => raise Fail "expected a single non-zapped, non-Metis Var") in (DETERM (eresolve_tac ctxt @{thms allE} i THEN rotate_tac ~1 i) THEN PRIMITIVE do_instantiate) st end fun fix_exists_tac ctxt t = eresolve_tac ctxt [exE] THEN' rename_tac [t |> dest_Var |> fst |> fst] fun release_quantifier_tac ctxt (skolem, t) = (if skolem then fix_exists_tac ctxt else instantiate_forall_tac ctxt) t fun release_clusters_tac _ _ _ [] = K all_tac | release_clusters_tac ctxt ax_counts substs ((ax_no, cluster_no) :: clusters) = let val cluster_of_var = Meson_Clausify.cluster_of_zapped_var_name o fst o fst o dest_Var fun in_right_cluster ((_, (cluster_no', _)), _) = cluster_no' = cluster_no val cluster_substs = substs |> map_filter (fn (ax_no', (_, (_, tsubst))) => if ax_no' = ax_no then tsubst |> map (apfst cluster_of_var) |> filter (in_right_cluster o fst) |> map (apfst snd) |> SOME else NONE) fun do_cluster_subst cluster_subst = map (release_quantifier_tac ctxt) cluster_subst @ [rotate_tac 1] val first_prem = find_index (fn (ax_no', _) => ax_no' = ax_no) substs in rotate_tac first_prem THEN' (EVERY' (maps do_cluster_subst cluster_substs)) THEN' rotate_tac (~ first_prem - length cluster_substs) THEN' release_clusters_tac ctxt ax_counts substs clusters end fun cluster_key ((ax_no, (cluster_no, index_no)), skolem) = (ax_no, (cluster_no, (skolem, index_no))) fun cluster_ord p = prod_ord int_ord (prod_ord int_ord (prod_ord bool_ord int_ord)) (apply2 cluster_key p) val tysubst_ord = list_ord (prod_ord Term_Ord.fast_indexname_ord (prod_ord Term_Ord.sort_ord Term_Ord.typ_ord)) structure Int_Tysubst_Table = Table ( type key = int * (indexname * (sort * typ)) list val ord = prod_ord int_ord tysubst_ord ) structure Int_Pair_Graph = Graph( type key = int * int val ord = prod_ord int_ord int_ord ) fun shuffle_key (((axiom_no, (_, index_no)), _), _) = (axiom_no, index_no) fun shuffle_ord p = prod_ord int_ord int_ord (apply2 shuffle_key p) (*Attempts to derive the theorem "False" from a theorem of the form "P1 ==> ... ==> Pn ==> False", where the "Pi"s are to be discharged using the specified axioms. The axioms have leading "All" and "Ex" quantifiers, which must be eliminated first.*) fun discharge_skolem_premises ctxt axioms prems_imp_false = if Thm.prop_of prems_imp_false aconv \<^prop>\False\ then prems_imp_false else let val thy = Proof_Context.theory_of ctxt fun match_term p = let val (tyenv, tenv) = Pattern.first_order_match thy p (Vartab.empty, Vartab.empty) val tsubst = tenv |> Vartab.dest |> filter (Meson_Clausify.is_zapped_var_name o fst o fst) |> sort (cluster_ord o apply2 (Meson_Clausify.cluster_of_zapped_var_name o fst o fst)) |> map (fn (xi, (T, t)) => apply2 (Envir.subst_term_types tyenv) (Var (xi, T), t)) val tysubst = tyenv |> Vartab.dest in (tysubst, tsubst) end fun subst_info_of_prem subgoal_no prem = (case prem of _ $ \<^Const_>\Meson.skolem _ for \_ $ t $ num\\ => let val ax_no = HOLogic.dest_nat num in (ax_no, (subgoal_no, match_term (nth axioms ax_no |> the |> snd, t))) end | _ => raise TERM ("discharge_skolem_premises: Malformed premise", [prem])) fun cluster_of_var_name skolem s = (case try Meson_Clausify.cluster_of_zapped_var_name s of NONE => NONE | SOME ((ax_no, (cluster_no, _)), skolem') => if skolem' = skolem andalso cluster_no > 0 then SOME (ax_no, cluster_no) else NONE) fun clusters_in_term skolem t = Term.add_var_names t [] |> map_filter (cluster_of_var_name skolem o fst) fun deps_of_term_subst (var, t) = (case clusters_in_term false var of [] => NONE | [(ax_no, cluster_no)] => SOME ((ax_no, cluster_no), clusters_in_term true t |> cluster_no > 1 ? cons (ax_no, cluster_no - 1)) | _ => raise TERM ("discharge_skolem_premises: Expected Var", [var])) - val prems = Logic.strip_imp_prems (Thm.prop_of prems_imp_false) - val substs = prems |> map2 subst_info_of_prem (1 upto length prems) - |> sort (int_ord o apply2 fst) + val substs = + map_index (fn (i, prem) => subst_info_of_prem (i + 1) prem) prems + |> sort (int_ord o apply2 fst) val depss = maps (map_filter deps_of_term_subst o snd o snd o snd) substs val clusters = maps (op ::) depss val ordered_clusters = Int_Pair_Graph.empty |> fold Int_Pair_Graph.default_node (map (rpair ()) clusters) |> fold Int_Pair_Graph.add_deps_acyclic depss |> Int_Pair_Graph.topological_order handle Int_Pair_Graph.CYCLES _ => error "Cannot replay Metis proof in Isabelle without \"Hilbert_Choice\"" val ax_counts = Int_Tysubst_Table.empty |> fold (fn (ax_no, (_, (tysubst, _))) => Int_Tysubst_Table.map_default ((ax_no, tysubst), 0) (Integer.add 1)) substs |> Int_Tysubst_Table.dest val needed_axiom_props = - 0 upto length axioms - 1 ~~ axioms + map_index I axioms |> map_filter (fn (_, NONE) => NONE | (ax_no, SOME (_, t)) => if exists (fn ((ax_no', _), n) => ax_no' = ax_no andalso n > 0) ax_counts then SOME t else NONE) val outer_param_names = [] |> fold Term.add_var_names needed_axiom_props |> filter (Meson_Clausify.is_zapped_var_name o fst) |> map (`(Meson_Clausify.cluster_of_zapped_var_name o fst)) |> filter (fn (((_, (cluster_no, _)), skolem), _) => cluster_no = 0 andalso skolem) |> sort shuffle_ord |> map (fst o snd) (* for debugging only: fun string_of_subst_info (ax_no, (subgoal_no, (tysubst, tsubst))) = "ax: " ^ string_of_int ax_no ^ "; asm: " ^ string_of_int subgoal_no ^ "; tysubst: " ^ @{make_string} tysubst ^ "; tsubst: {" ^ commas (map ((fn (s, t) => s ^ " |-> " ^ t) o apply2 (Syntax.string_of_term ctxt)) tsubst) ^ "}" val _ = tracing ("ORDERED CLUSTERS: " ^ @{make_string} ordered_clusters) val _ = tracing ("AXIOM COUNTS: " ^ @{make_string} ax_counts) val _ = tracing ("OUTER PARAMS: " ^ @{make_string} outer_param_names) val _ = tracing ("SUBSTS (" ^ string_of_int (length substs) ^ "):\n" ^ cat_lines (map string_of_subst_info substs)) *) val ctxt' = fold Thm.declare_hyps (Thm.chyps_of prems_imp_false) ctxt fun cut_and_ex_tac axiom = cut_tac axiom 1 THEN TRY (REPEAT_ALL_NEW (eresolve_tac ctxt' @{thms exE}) 1) fun rotation_of_subgoal i = find_index (fn (_, (subgoal_no, _)) => subgoal_no = i) substs in Goal.prove ctxt' [] [] \<^prop>\False\ (K (DETERM (EVERY (map (cut_and_ex_tac o fst o the o nth axioms o fst o fst) ax_counts) THEN rename_tac outer_param_names 1 THEN copy_prems_tac ctxt' (map snd ax_counts) [] 1) THEN release_clusters_tac ctxt' ax_counts substs ordered_clusters 1 THEN match_tac ctxt' [prems_imp_false] 1 THEN ALLGOALS (fn i => resolve_tac ctxt' @{thms Meson.skolem_COMBK_I} i THEN rotate_tac (rotation_of_subgoal i) i THEN PRIMITIVE (unify_first_prem_with_concl ctxt' i) THEN assume_tac ctxt' i THEN flexflex_tac ctxt'))) handle ERROR msg => cat_error msg "Cannot replay Metis proof in Isabelle: error when discharging Skolem assumptions" end end; diff --git a/src/HOL/Tools/Metis/metis_tactic.ML b/src/HOL/Tools/Metis/metis_tactic.ML --- a/src/HOL/Tools/Metis/metis_tactic.ML +++ b/src/HOL/Tools/Metis/metis_tactic.ML @@ -1,311 +1,311 @@ (* Title: HOL/Tools/Metis/metis_tactic.ML Author: Kong W. Susanto, Cambridge University Computer Laboratory Author: Lawrence C. Paulson, Cambridge University Computer Laboratory Author: Jasmin Blanchette, TU Muenchen Copyright Cambridge University 2007 HOL setup for the Metis prover. *) signature METIS_TACTIC = sig val trace : bool Config.T val verbose : bool Config.T val new_skolem : bool Config.T val advisory_simp : bool Config.T val metis_tac_unused : string list -> string -> Proof.context -> thm list -> int -> thm -> thm list * thm Seq.seq val metis_tac : string list -> string -> Proof.context -> thm list -> int -> tactic val metis_method : (string list option * string option) * thm list -> Proof.context -> thm list -> tactic val metis_lam_transs : string list val parse_metis_options : (string list option * string option) parser end structure Metis_Tactic : METIS_TACTIC = struct open ATP_Problem_Generate open ATP_Proof_Reconstruct open Metis_Generate open Metis_Reconstruct val new_skolem = Attrib.setup_config_bool \<^binding>\metis_new_skolem\ (K false) val advisory_simp = Attrib.setup_config_bool \<^binding>\metis_advisory_simp\ (K true) (* Designed to work also with monomorphic instances of polymorphic theorems. *) fun have_common_thm ctxt ths1 ths2 = exists (member (Term.aconv_untyped o apply2 Thm.prop_of) ths1) (map (Meson.make_meta_clause ctxt) ths2) (*Determining which axiom clauses are actually used*) fun used_axioms axioms (th, Metis_Proof.Axiom _) = SOME (lookth axioms th) | used_axioms _ _ = NONE (* Lightweight predicate type information comes in two flavors, "t = t'" and "t => t'", where "t" and "t'" are the same term modulo type tags. In Isabelle, type tags are stripped away, so we are left with "t = t" or "t => t". Type tag idempotence is also handled this way. *) fun reflexive_or_trivial_of_metis ctxt type_enc sym_tab concealed mth = (case hol_clause_of_metis ctxt type_enc sym_tab concealed mth of \<^Const_>\HOL.eq _ for _ t\ => let val ct = Thm.cterm_of ctxt t val cT = Thm.ctyp_of_cterm ct in refl |> Thm.instantiate' [SOME cT] [SOME ct] end | \<^Const_>\disj for t1 t2\ => (if can HOLogic.dest_not t1 then t2 else t1) |> HOLogic.mk_Trueprop |> Thm.cterm_of ctxt |> Thm.trivial | _ => raise Fail "expected reflexive or trivial clause") |> Meson.make_meta_clause ctxt fun lam_lifted_of_metis ctxt type_enc sym_tab concealed mth = let val tac = rewrite_goals_tac ctxt @{thms lambda_def [abs_def]} THEN resolve_tac ctxt [refl] 1 val t = hol_clause_of_metis ctxt type_enc sym_tab concealed mth val ct = Thm.cterm_of ctxt (HOLogic.mk_Trueprop t) in Goal.prove_internal ctxt [] ct (K tac) |> Meson.make_meta_clause ctxt end fun add_vars_and_frees (t $ u) = fold (add_vars_and_frees) [t, u] | add_vars_and_frees (Abs (_, _, t)) = add_vars_and_frees t | add_vars_and_frees (t as Var _) = insert (op =) t | add_vars_and_frees (t as Free _) = insert (op =) t | add_vars_and_frees _ = I fun introduce_lam_wrappers ctxt th = if Meson_Clausify.is_quasi_lambda_free (Thm.prop_of th) then th else let fun conv first ctxt ct = if Meson_Clausify.is_quasi_lambda_free (Thm.term_of ct) then Thm.reflexive ct else (case Thm.term_of ct of Abs (_, _, u) => if first then (case add_vars_and_frees u [] of [] => Conv.abs_conv (conv false o snd) ctxt ct |> (fn th => Meson.first_order_resolve ctxt th @{thm Metis.eq_lambdaI}) | v :: _ => Abs (Name.uu, fastype_of v, abstract_over (v, Thm.term_of ct)) $ v |> Thm.cterm_of ctxt |> Conv.comb_conv (conv true ctxt)) else Conv.abs_conv (conv false o snd) ctxt ct | \<^Const_>\Meson.skolem _ for _\ => Thm.reflexive ct | _ => Conv.comb_conv (conv true ctxt) ct) val eq_th = conv true ctxt (Thm.cprop_of th) (* We replace the equation's left-hand side with a beta-equivalent term so that "Thm.equal_elim" works below. *) val t0 $ _ $ t2 = Thm.prop_of eq_th val eq_ct = t0 $ Thm.prop_of th $ t2 |> Thm.cterm_of ctxt val eq_th' = Goal.prove_internal ctxt [] eq_ct (K (resolve_tac ctxt [eq_th] 1)) in Thm.equal_elim eq_th' th end fun clause_params ordering = {ordering = ordering, orderLiterals = Metis_Clause.UnsignedLiteralOrder, orderTerms = true} fun active_params ordering = {clause = clause_params ordering, prefactor = #prefactor Metis_Active.default, postfactor = #postfactor Metis_Active.default} val waiting_params = {symbolsWeight = 1.0, variablesWeight = 0.05, literalsWeight = 0.01, models = []} fun resolution_params ordering = {active = active_params ordering, waiting = waiting_params} fun kbo_advisory_simp_ordering ord_info = let fun weight (m, _) = AList.lookup (op =) ord_info (Metis_Name.toString m) |> the_default 1 fun precedence p = (case int_ord (apply2 weight p) of EQUAL => #precedence Metis_KnuthBendixOrder.default p | ord => ord) in {weight = weight, precedence = precedence} end fun metis_call type_enc lam_trans = let val type_enc = (case AList.find (fn (enc, encs) => enc = hd encs) type_enc_aliases type_enc of [alias] => alias | _ => type_enc) val opts = [] |> type_enc <> partial_typesN ? cons type_enc |> lam_trans <> default_metis_lam_trans ? cons lam_trans in metisN ^ (if null opts then "" else " (" ^ commas opts ^ ")") end exception METIS_UNPROVABLE of unit (* Main function to start Metis proof and reconstruction *) fun FOL_SOLVE unused type_encs lam_trans ctxt cls ths0 = let val thy = Proof_Context.theory_of ctxt val new_skolem = Config.get ctxt new_skolem orelse null (Meson.choice_theorems thy) val do_lams = lam_trans = liftingN ? introduce_lam_wrappers ctxt val th_cls_pairs = - map2 (fn j => fn th => + map_index (fn (j, th) => (Thm.get_name_hint th, th |> Drule.eta_contraction_rule |> Meson_Clausify.cnf_axiom Meson.simp_options_all_true ctxt new_skolem (lam_trans = combsN) j ||> map do_lams)) - (0 upto length ths0 - 1) ths0 + ths0 val ths = maps (snd o snd) th_cls_pairs val dischargers = map (fst o snd) th_cls_pairs val cls = cls |> map (Drule.eta_contraction_rule #> do_lams) val _ = trace_msg ctxt (K "FOL_SOLVE: CONJECTURE CLAUSES") val _ = List.app (fn th => trace_msg ctxt (fn () => Thm.string_of_thm ctxt th)) cls val type_enc :: fallback_type_encs = type_encs val _ = trace_msg ctxt (fn () => "type_enc = " ^ type_enc) val type_enc = type_enc_of_string Strict type_enc val (sym_tab, axioms, ord_info, concealed) = generate_metis_problem ctxt type_enc lam_trans cls ths fun get_isa_thm mth Isa_Reflexive_or_Trivial = reflexive_or_trivial_of_metis ctxt type_enc sym_tab concealed mth | get_isa_thm mth Isa_Lambda_Lifted = lam_lifted_of_metis ctxt type_enc sym_tab concealed mth | get_isa_thm _ (Isa_Raw ith) = ith val axioms = axioms |> map (fn (mth, ith) => (mth, get_isa_thm mth ith |> Thm.close_derivation \<^here>)) val _ = trace_msg ctxt (K "ISABELLE CLAUSES") val _ = List.app (fn (_, ith) => trace_msg ctxt (fn () => Thm.string_of_thm ctxt ith)) axioms val _ = trace_msg ctxt (K "METIS CLAUSES") val _ = List.app (fn (mth, _) => trace_msg ctxt (fn () => Metis_Thm.toString mth)) axioms val _ = trace_msg ctxt (K "START METIS PROVE PROCESS") val ordering = if Config.get ctxt advisory_simp then kbo_advisory_simp_ordering (ord_info ()) else Metis_KnuthBendixOrder.default fun fall_back () = (verbose_warning ctxt ("Falling back on " ^ quote (metis_call (hd fallback_type_encs) lam_trans) ^ "..."); FOL_SOLVE unused fallback_type_encs lam_trans ctxt cls ths0) in (case filter (fn t => Thm.prop_of t aconv \<^prop>\False\) cls of false_th :: _ => [false_th RS @{thm FalseE}] | [] => (case Metis_Resolution.loop (Metis_Resolution.new (resolution_params ordering) {axioms = axioms |> map fst, conjecture = []}) of Metis_Resolution.Contradiction mth => let val _ = trace_msg ctxt (fn () => "METIS RECONSTRUCTION START: " ^ Metis_Thm.toString mth) val ctxt' = fold Variable.declare_constraints (map Thm.prop_of cls) ctxt (*add constraints arising from converting goal to clause form*) val proof = Metis_Proof.proof mth val result = fold (replay_one_inference ctxt' type_enc concealed sym_tab) proof axioms val used = map_filter (used_axioms axioms) proof val _ = trace_msg ctxt (K "METIS COMPLETED; clauses actually used:") val _ = List.app (fn th => trace_msg ctxt (fn () => Thm.string_of_thm ctxt th)) used val unused_th_cls_pairs = filter_out (have_common_thm ctxt used o #2 o #2) th_cls_pairs val _ = unused := maps (#2 o #2) unused_th_cls_pairs; val _ = if not (null unused_th_cls_pairs) then verbose_warning ctxt ("Unused theorems: " ^ commas_quote (map #1 unused_th_cls_pairs)) else (); val _ = if not (null cls) andalso not (have_common_thm ctxt used cls) then verbose_warning ctxt "The assumptions are inconsistent" else (); in (case result of (_, ith) :: _ => (trace_msg ctxt (fn () => "Success: " ^ Thm.string_of_thm ctxt ith); [discharge_skolem_premises ctxt dischargers ith]) | _ => (trace_msg ctxt (K "Metis: No result"); [])) end | Metis_Resolution.Satisfiable _ => (trace_msg ctxt (K "Metis: No first-order proof with the supplied lemmas"); raise METIS_UNPROVABLE ())) handle METIS_UNPROVABLE () => if null fallback_type_encs then [] else fall_back () | METIS_RECONSTRUCT (loc, msg) => if null fallback_type_encs then (verbose_warning ctxt ("Failed to replay Metis proof\n" ^ loc ^ ": " ^ msg); []) else fall_back ()) end fun neg_clausify ctxt combinators = single #> Meson.make_clauses_unsorted ctxt #> combinators ? map (Meson_Clausify.introduce_combinators_in_theorem ctxt) #> Meson.finish_cnf fun preskolem_tac ctxt st0 = (if exists (Meson.has_too_many_clauses ctxt) (Logic.prems_of_goal (Thm.prop_of st0) 1) then Simplifier.full_simp_tac (Meson_Clausify.ss_only @{thms not_all not_ex} ctxt) 1 THEN CNF.cnfx_rewrite_tac ctxt 1 else all_tac) st0 fun metis_tac_unused type_encs0 lam_trans ctxt ths i st0 = let val unused = Unsynchronized.ref [] val type_encs = if null type_encs0 then partial_type_encs else type_encs0 val _ = trace_msg ctxt (fn () => "Metis called with theorems\n" ^ cat_lines (map (Thm.string_of_thm ctxt) ths)) val type_encs = type_encs |> maps unalias_type_enc val combs = (lam_trans = combsN) fun tac clause = resolve_tac ctxt (FOL_SOLVE unused type_encs lam_trans ctxt clause ths) 1 val seq = Meson.MESON (preskolem_tac ctxt) (maps (neg_clausify ctxt combs)) tac ctxt i st0 in (!unused, seq) end fun metis_tac type_encs lam_trans ctxt ths i = snd o metis_tac_unused type_encs lam_trans ctxt ths i (* Whenever "X" has schematic type variables, we treat "using X by metis" as "by (metis X)" to prevent "Subgoal.FOCUS" from freezing the type variables. We don't do it for nonschematic facts "X" because this breaks a few proofs (in the rare and subtle case where a proof relied on extensionality not being applied) and brings few benefits. *) val has_tvar = exists_type (exists_subtype (fn TVar _ => true | _ => false)) o Thm.prop_of fun metis_method ((override_type_encs, lam_trans), ths) ctxt facts = let val (schem_facts, nonschem_facts) = List.partition has_tvar facts in HEADGOAL (Method.insert_tac ctxt nonschem_facts THEN' CHANGED_PROP o metis_tac (these override_type_encs) (the_default default_metis_lam_trans lam_trans) ctxt (schem_facts @ ths)) end val metis_lam_transs = [opaque_liftingN, liftingN, combsN] fun set_opt _ x NONE = SOME x | set_opt get x (SOME x0) = error ("Cannot specify both " ^ quote (get x0) ^ " and " ^ quote (get x)) fun consider_opt s = if s = "hide_lams" then error "\"hide_lams\" has been renamed \"opaque_lifting\"" else if member (op =) metis_lam_transs s then apsnd (set_opt I s) else apfst (set_opt hd [s]) val parse_metis_options = Scan.optional (Args.parens (Args.name -- Scan.option (\<^keyword>\,\ |-- Args.name)) >> (fn (s, s') => (NONE, NONE) |> consider_opt s |> (case s' of SOME s' => consider_opt s' | _ => I))) (NONE, NONE) val _ = Theory.setup (Method.setup \<^binding>\metis\ (Scan.lift parse_metis_options -- Attrib.thms >> (METHOD oo metis_method)) "Metis for FOL and HOL problems") end; diff --git a/src/HOL/Tools/Mirabelle/mirabelle_sledgehammer.ML b/src/HOL/Tools/Mirabelle/mirabelle_sledgehammer.ML --- a/src/HOL/Tools/Mirabelle/mirabelle_sledgehammer.ML +++ b/src/HOL/Tools/Mirabelle/mirabelle_sledgehammer.ML @@ -1,630 +1,568 @@ (* Title: HOL/Mirabelle/Tools/mirabelle_sledgehammer.ML Author: Jasmin Blanchette, TU Munich Author: Sascha Boehme, TU Munich Author: Tobias Nipkow, TU Munich Author: Makarius - Author: Martin Desharnais, UniBw Munich + Author: Martin Desharnais, UniBw Munich, MPI-INF Saarbruecken Mirabelle action: "sledgehammer". *) structure Mirabelle_Sledgehammer: MIRABELLE_ACTION = struct (*To facilitate synching the description of Mirabelle Sledgehammer parameters (in ../lib/Tools/mirabelle) with the parameters actually used by this interface, the former extracts PARAMETER and DESCRIPTION from code below which has this pattern (provided it appears in a single line): val .*K = "PARAMETER" (*DESCRIPTION*) *) (* NOTE: Do not forget to update the Sledgehammer documentation to reflect changes here. *) val check_trivialK = "check_trivial" (*=BOOL: check if goals are "trivial"*) val e_selection_heuristicK = "e_selection_heuristic" (*=STRING: E clause selection heuristic*) -val fact_filterK = "fact_filter" (*=STRING: fact filter*) val force_sosK = "force_sos" (*=BOOL: use set-of-support (in Vampire)*) -val isar_proofsK = "isar_proofs" (*=SMART_BOOL: enable Isar proof generation*) val keepK = "keep" (*=BOOL: keep temporary files created by sledgehammer*) -val lam_transK = "lam_trans" (*=STRING: lambda translation scheme*) -val max_factsK = "max_facts" (*=NUM: max. relevant clauses to use*) -val max_mono_itersK = "max_mono_iters" (*=NUM: max. iterations of monomorphiser*) -val max_new_mono_instancesK = "max_new_mono_instances" (*=NUM: max. new monomorphic instances*) -val max_relevantK = "max_relevant" (*=NUM: max. relevant clauses to use*) -val minimizeK = "minimize" (*=BOOL: instruct sledgehammer to run its minimizer*) -val preplay_timeoutK = "preplay_timeout" (*=TIME: timeout for finding reconstructed proof*) val proof_methodK = "proof_method" (*=STRING: how to reconstruct proofs (e.g. using metis)*) val proverK = "prover" (*=STRING: name of the external prover to call*) -val prover_timeoutK = "prover_timeout" (*=TIME: timeout for invoked ATP (seconds of process time)*) -val sliceK = "slice" (*=BOOL: allow sledgehammer-level strategy-scheduling*) -val smt_proofsK = "smt_proofs" (*=BOOL: enable SMT proof generation*) -val strictK = "strict" (*=BOOL: run in strict mode*) val term_orderK = "term_order" (*=STRING: term order (in E)*) -val type_encK = "type_enc" (*=STRING: type encoding scheme*) -val uncurried_aliasesK = "uncurried_aliases" (*=SMART_BOOL: use fresh function names to alias curried applications*) -(*FIXME sensible to have Mirabelle-level Sledgehammer defaults?*) (*defaults used in this Mirabelle action*) -val preplay_timeout_default = "1" -val lam_trans_default = "smart" -val uncurried_aliases_default = "smart" -val fact_filter_default = "smart" -val type_enc_default = "smart" -val strict_default = "false" -val max_facts_default = "smart" -val slice_default = "true" val check_trivial_default = false val keep_default = false -(*If a key is present in args then augment a list with its pair*) -(*This is used to avoid fixing default values at the Mirabelle level, and - instead use the default values of the tool (Sledgehammer in this case).*) -fun available_parameter args key label list = - let - val value = AList.lookup (op =) args key - in if is_some value then (label, the value) :: list else list end - datatype sh_data = ShData of { calls: int, success: int, nontriv_calls: int, nontriv_success: int, lemmas: int, max_lems: int, time_isa: int, time_prover: int, time_prover_fail: int} datatype re_data = ReData of { calls: int, success: int, nontriv_calls: int, nontriv_success: int, proofs: int, time: int, timeout: int, lemmas: int * int * int, posns: (Position.T * bool) list } fun make_sh_data (calls,success,nontriv_calls,nontriv_success,lemmas,max_lems,time_isa, time_prover,time_prover_fail) = ShData{calls=calls, success=success, nontriv_calls=nontriv_calls, nontriv_success=nontriv_success, lemmas=lemmas, max_lems=max_lems, time_isa=time_isa, time_prover=time_prover, time_prover_fail=time_prover_fail} fun make_re_data (calls,success,nontriv_calls,nontriv_success,proofs,time, timeout,lemmas,posns) = ReData{calls=calls, success=success, nontriv_calls=nontriv_calls, nontriv_success=nontriv_success, proofs=proofs, time=time, timeout=timeout, lemmas=lemmas, posns=posns} val empty_sh_data = make_sh_data (0, 0, 0, 0, 0, 0, 0, 0, 0) val empty_re_data = make_re_data (0, 0, 0, 0, 0, 0, 0, (0,0,0), []) fun tuple_of_sh_data (ShData {calls, success, nontriv_calls, nontriv_success, lemmas, max_lems, time_isa, time_prover, time_prover_fail}) = (calls, success, nontriv_calls, nontriv_success, lemmas, max_lems, time_isa, time_prover, time_prover_fail) fun tuple_of_re_data (ReData {calls, success, nontriv_calls, nontriv_success, proofs, time, timeout, lemmas, posns}) = (calls, success, nontriv_calls, nontriv_success, proofs, time, timeout, lemmas, posns) datatype data = Data of { sh: sh_data, re_u: re_data (* proof method with unminimized set of lemmas *) } type change_data = (data -> data) -> unit fun make_data (sh, re_u) = Data {sh=sh, re_u=re_u} val empty_data = make_data (empty_sh_data, empty_re_data) fun map_sh_data f (Data {sh, re_u}) = let val sh' = make_sh_data (f (tuple_of_sh_data sh)) in make_data (sh', re_u) end fun map_re_data f (Data {sh, re_u}) = let val f' = make_re_data o f o tuple_of_re_data val re_u' = f' re_u in make_data (sh, re_u') end fun inc_max (n:int) (s,sos,m) = (s+n, sos + n*n, Int.max(m,n)); val inc_sh_calls = map_sh_data (fn (calls, success, nontriv_calls, nontriv_success, lemmas,max_lems, time_isa, time_prover, time_prover_fail) => (calls + 1, success, nontriv_calls, nontriv_success, lemmas, max_lems, time_isa, time_prover, time_prover_fail)) val inc_sh_success = map_sh_data (fn (calls, success, nontriv_calls, nontriv_success, lemmas,max_lems, time_isa, time_prover, time_prover_fail) => (calls, success + 1, nontriv_calls, nontriv_success, lemmas,max_lems, time_isa, time_prover, time_prover_fail)) val inc_sh_nontriv_calls = map_sh_data (fn (calls, success, nontriv_calls, nontriv_success, lemmas,max_lems, time_isa, time_prover, time_prover_fail) => (calls, success, nontriv_calls + 1, nontriv_success, lemmas, max_lems, time_isa, time_prover, time_prover_fail)) val inc_sh_nontriv_success = map_sh_data (fn (calls, success, nontriv_calls, nontriv_success, lemmas,max_lems, time_isa, time_prover, time_prover_fail) => (calls, success, nontriv_calls, nontriv_success + 1, lemmas,max_lems, time_isa, time_prover, time_prover_fail)) fun inc_sh_lemmas n = map_sh_data (fn (calls,success,nontriv_calls, nontriv_success, lemmas,max_lems,time_isa,time_prover,time_prover_fail) => (calls,success,nontriv_calls, nontriv_success, lemmas+n,max_lems,time_isa,time_prover,time_prover_fail)) fun inc_sh_max_lems n = map_sh_data (fn (calls,success,nontriv_calls, nontriv_success, lemmas,max_lems,time_isa,time_prover,time_prover_fail) => (calls,success,nontriv_calls, nontriv_success, lemmas,Int.max(max_lems,n),time_isa,time_prover,time_prover_fail)) fun inc_sh_time_isa t = map_sh_data (fn (calls,success,nontriv_calls, nontriv_success, lemmas,max_lems,time_isa,time_prover,time_prover_fail) => (calls,success,nontriv_calls, nontriv_success, lemmas,max_lems,time_isa + t,time_prover,time_prover_fail)) fun inc_sh_time_prover t = map_sh_data (fn (calls,success,nontriv_calls, nontriv_success, lemmas,max_lems,time_isa,time_prover,time_prover_fail) => (calls,success,nontriv_calls, nontriv_success, lemmas,max_lems,time_isa,time_prover + t,time_prover_fail)) fun inc_sh_time_prover_fail t = map_sh_data (fn (calls,success,nontriv_calls, nontriv_success, lemmas,max_lems,time_isa,time_prover,time_prover_fail) => (calls,success,nontriv_calls, nontriv_success, lemmas,max_lems,time_isa,time_prover,time_prover_fail + t)) val inc_proof_method_calls = map_re_data (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns) => (calls + 1, success, nontriv_calls, nontriv_success, proofs, time, timeout, lemmas,posns)) val inc_proof_method_success = map_re_data (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns) => (calls, success + 1, nontriv_calls, nontriv_success, proofs, time, timeout, lemmas,posns)) val inc_proof_method_nontriv_calls = map_re_data (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns) => (calls, success, nontriv_calls + 1, nontriv_success, proofs, time, timeout, lemmas,posns)) val inc_proof_method_nontriv_success = map_re_data (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns) => (calls, success, nontriv_calls, nontriv_success + 1, proofs, time, timeout, lemmas,posns)) val inc_proof_method_proofs = map_re_data (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns) => (calls, success, nontriv_calls, nontriv_success, proofs + 1, time, timeout, lemmas,posns)) fun inc_proof_method_time t = map_re_data (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns) => (calls, success, nontriv_calls, nontriv_success, proofs, time + t, timeout, lemmas,posns)) val inc_proof_method_timeout = map_re_data (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns) => (calls, success, nontriv_calls, nontriv_success, proofs, time, timeout + 1, lemmas,posns)) fun inc_proof_method_lemmas n = map_re_data (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns) => (calls, success, nontriv_calls, nontriv_success, proofs, time, timeout, inc_max n lemmas, posns)) fun inc_proof_method_posns pos = map_re_data (fn (calls,success,nontriv_calls, nontriv_success, proofs,time,timeout,lemmas,posns) => (calls, success, nontriv_calls, nontriv_success, proofs, time, timeout, lemmas, pos::posns)) val str0 = string_of_int o the_default 0 local val str = string_of_int val str3 = Real.fmt (StringCvt.FIX (SOME 3)) fun percentage a b = string_of_int (a * 100 div b) fun ms t = Real.fromInt t / 1000.0 fun avg_time t n = if n > 0 then (Real.fromInt t / 1000.0) / Real.fromInt n else 0.0 fun log_sh_data (ShData {calls, success, nontriv_calls, nontriv_success, lemmas, max_lems, time_isa, time_prover, time_prover_fail}) = "\nTotal number of sledgehammer calls: " ^ str calls ^ "\nNumber of successful sledgehammer calls: " ^ str success ^ "\nNumber of sledgehammer lemmas: " ^ str lemmas ^ "\nMax number of sledgehammer lemmas: " ^ str max_lems ^ "\nSuccess rate: " ^ percentage success calls ^ "%" ^ "\nTotal number of nontrivial sledgehammer calls: " ^ str nontriv_calls ^ "\nNumber of successful nontrivial sledgehammer calls: " ^ str nontriv_success ^ "\nTotal time for sledgehammer calls (Isabelle): " ^ str3 (ms time_isa) ^ "\nTotal time for successful sledgehammer calls (ATP): " ^ str3 (ms time_prover) ^ "\nTotal time for failed sledgehammer calls (ATP): " ^ str3 (ms time_prover_fail) ^ "\nAverage time for sledgehammer calls (Isabelle): " ^ str3 (avg_time time_isa calls) ^ "\nAverage time for successful sledgehammer calls (ATP): " ^ str3 (avg_time time_prover success) ^ "\nAverage time for failed sledgehammer calls (ATP): " ^ str3 (avg_time time_prover_fail (calls - success)) fun log_re_data sh_calls (ReData {calls, success, nontriv_calls, nontriv_success, proofs, time, timeout, lemmas = (lemmas, lems_sos, lems_max), posns}) = let val proved = posns |> map (fn (pos, triv) => str0 (Position.line_of pos) ^ ":" ^ str0 (Position.offset_of pos) ^ (if triv then "[T]" else "")) in "\nTotal number of proof method calls: " ^ str calls ^ "\nNumber of successful proof method calls: " ^ str success ^ " (proof: " ^ str proofs ^ ")" ^ "\nNumber of proof method timeouts: " ^ str timeout ^ "\nSuccess rate: " ^ percentage success sh_calls ^ "%" ^ "\nTotal number of nontrivial proof method calls: " ^ str nontriv_calls ^ "\nNumber of successful nontrivial proof method calls: " ^ str nontriv_success ^ " (proof: " ^ str proofs ^ ")" ^ "\nNumber of successful proof method lemmas: " ^ str lemmas ^ "\nSOS of successful proof method lemmas: " ^ str lems_sos ^ "\nMax number of successful proof method lemmas: " ^ str lems_max ^ "\nTotal time for successful proof method calls: " ^ str3 (ms time) ^ "\nAverage time for successful proof method calls: " ^ str3 (avg_time time success) ^ "\nProved: " ^ space_implode " " proved end in fun log_data (Data {sh, re_u}) = let val ShData {calls=sh_calls, ...} = sh val ReData {calls=re_calls, ...} = re_u in if sh_calls > 0 then let val text1 = log_sh_data sh in if re_calls > 0 then text1 ^ "\n" ^ log_re_data sh_calls re_u else text1 end else "" end end fun get_prover_name thy args = let fun default_prover_name () = hd (#provers (Sledgehammer_Commands.default_params thy [])) handle List.Empty => error "No ATP available" in (case AList.lookup (op =) args proverK of SOME name => name | NONE => default_prover_name ()) end fun get_prover ctxt name params goal = let val learn = Sledgehammer_MaSh.mash_learn_proof ctxt params (Thm.prop_of goal) in Sledgehammer_Prover_Minimize.get_minimizing_prover ctxt Sledgehammer_Prover.Normal learn name end type stature = ATP_Problem_Generate.stature fun is_good_line s = (String.isSubstring " ms)" s orelse String.isSubstring " s)" s) andalso not (String.isSubstring "(> " s) andalso not (String.isSubstring ", > " s) andalso not (String.isSubstring "may fail" s) (* Fragile hack *) fun proof_method_from_msg args msg = (case AList.lookup (op =) args proof_methodK of SOME name => if name = "smart" then if exists is_good_line (split_lines msg) then "none" else "fail" else name | NONE => if exists is_good_line (split_lines msg) then "none" (* trust the preplayed proof *) else if String.isSubstring "metis (" msg then msg |> Substring.full |> Substring.position "metis (" |> snd |> Substring.position ")" |> fst |> Substring.string |> suffix ")" else if String.isSubstring "metis" msg then "metis" else "smt") local datatype sh_result = SH_OK of int * int * (string * stature) list | SH_FAIL of int * int | SH_ERROR -fun run_sh prover_name fact_filter type_enc strict max_facts slice - lam_trans uncurried_aliases e_selection_heuristic term_order force_sos - hard_timeout timeout preplay_timeout isar_proofsLST smt_proofsLST - minimizeLST max_new_mono_instancesLST max_mono_itersLST dir pos st = +fun run_sh (params as {max_facts, minimize, preplay_timeout, ...}) prover_name e_selection_heuristic + term_order force_sos hard_timeout dir pos st = let val thy = Proof.theory_of st val {context = ctxt, facts = chained_ths, goal} = Proof.goal st val i = 1 fun set_file_name (SOME dir) = - Config.put Sledgehammer_Prover_ATP.atp_dest_dir dir - #> Config.put Sledgehammer_Prover_ATP.atp_problem_prefix - ("prob_" ^ + let + val filename = "prob_" ^ StringCvt.padLeft #"0" 5 (str0 (Position.line_of pos)) ^ "_" ^ - StringCvt.padLeft #"0" 6 (str0 (Position.offset_of pos)) ^ "__") - #> Config.put SMT_Config.debug_files - (dir ^ "/" ^ Name.desymbolize (SOME false) (ATP_Util.timestamp ()) ^ "_" - ^ serial_string ()) + StringCvt.padLeft #"0" 6 (str0 (Position.offset_of pos)) + in + Config.put Sledgehammer_Prover_ATP.atp_dest_dir dir + #> Config.put Sledgehammer_Prover_ATP.atp_problem_prefix (filename ^ "__") + #> Config.put SMT_Config.debug_files (dir ^ "/" ^ filename ^ "__" ^ serial_string ()) + end | set_file_name NONE = I val st' = st |> Proof.map_context (set_file_name dir #> (Option.map (Config.put Sledgehammer_ATP_Systems.e_selection_heuristic) e_selection_heuristic |> the_default I) #> (Option.map (Config.put Sledgehammer_ATP_Systems.term_order) term_order |> the_default I) #> (Option.map (Config.put Sledgehammer_ATP_Systems.force_sos) force_sos |> the_default I)) - val params as {max_facts, minimize, preplay_timeout, ...} = - Sledgehammer_Commands.default_params thy - ([(* ("verbose", "true"), *) - ("fact_filter", fact_filter), - ("type_enc", type_enc), - ("strict", strict), - ("lam_trans", lam_trans |> the_default lam_trans_default), - ("uncurried_aliases", uncurried_aliases |> the_default uncurried_aliases_default), - ("max_facts", max_facts), - ("slice", slice), - ("timeout", string_of_int timeout), - ("preplay_timeout", preplay_timeout)] - |> isar_proofsLST - |> smt_proofsLST - |> minimizeLST (*don't confuse the two minimization flags*) - |> max_new_mono_instancesLST - |> max_mono_itersLST) val default_max_facts = Sledgehammer_Prover_Minimize.default_max_facts_of_prover ctxt prover_name val (_, hyp_ts, concl_t) = ATP_Util.strip_subgoal goal i ctxt val time_limit = (case hard_timeout of NONE => I - | SOME secs => Timeout.apply (Time.fromSeconds secs)) + | SOME t => Timeout.apply t) fun failed failure = ({outcome = SOME failure, used_facts = [], used_from = [], preferred_methss = (Sledgehammer_Proof_Methods.Auto_Method, []), run_time = Time.zeroTime, message = K ""}, ~1) val ({outcome, used_facts, preferred_methss, run_time, message, ...} : Sledgehammer_Prover.prover_result, time_isa) = time_limit (Mirabelle.cpu_time (fn () => let val ho_atp = Sledgehammer_Prover_ATP.is_ho_atp ctxt prover_name val keywords = Thy_Header.get_keywords' ctxt val css_table = Sledgehammer_Fact.clasimpset_rule_table_of ctxt val facts = Sledgehammer_Fact.nearly_all_facts ctxt ho_atp Sledgehammer_Fact.no_fact_override keywords css_table chained_ths hyp_ts concl_t val factss = facts |> Sledgehammer_MaSh.relevant_facts ctxt params prover_name (the_default default_max_facts max_facts) Sledgehammer_Fact.no_fact_override hyp_ts concl_t |> tap (fn factss => "Line " ^ str0 (Position.line_of pos) ^ ": " ^ Sledgehammer.string_of_factss factss |> writeln) val prover = get_prover ctxt prover_name params goal val problem = {comment = "", state = st', goal = goal, subgoal = i, subgoal_count = Sledgehammer_Util.subgoal_count st, factss = factss, found_proof = I} in prover params problem end)) () handle Timeout.TIMEOUT _ => failed ATP_Proof.TimedOut | Fail "inappropriate" => failed ATP_Proof.Inappropriate val time_prover = run_time |> Time.toMilliseconds val msg = message (fn () => Sledgehammer.play_one_line_proof minimize preplay_timeout used_facts st' i preferred_methss) in (case outcome of NONE => (msg, SH_OK (time_isa, time_prover, used_facts)) | SOME _ => (msg, SH_FAIL (time_isa, time_prover))) end handle ERROR msg => ("error: " ^ msg, SH_ERROR) in -fun run_sledgehammer change_data thy_index trivial output_dir args proof_method named_thms pos st = +fun run_sledgehammer change_data (params as {provers, timeout, ...}) output_dir + e_selection_heuristic term_order force_sos keep proof_method_from_msg thy_index trivial + proof_method named_thms pos st = let val thy = Proof.theory_of st val thy_name = Context.theory_name thy val triv_str = if trivial then "[T] " else "" val _ = change_data inc_sh_calls val _ = if trivial then () else change_data inc_sh_nontriv_calls - val prover_name = get_prover_name thy args - val fact_filter = AList.lookup (op =) args fact_filterK |> the_default fact_filter_default - val type_enc = AList.lookup (op =) args type_encK |> the_default type_enc_default - val strict = AList.lookup (op =) args strictK |> the_default strict_default - val max_facts = - (case AList.lookup (op =) args max_factsK of - SOME max => max - | NONE => - (case AList.lookup (op =) args max_relevantK of - SOME max => max - | NONE => max_facts_default)) - val slice = AList.lookup (op =) args sliceK |> the_default slice_default - val lam_trans = AList.lookup (op =) args lam_transK - val uncurried_aliases = AList.lookup (op =) args uncurried_aliasesK - val e_selection_heuristic = AList.lookup (op =) args e_selection_heuristicK - val term_order = AList.lookup (op =) args term_orderK - val force_sos = AList.lookup (op =) args force_sosK - |> Option.map (curry (op <>) "false") val keep_dir = - if Mirabelle.get_bool_argument args (keepK, keep_default) then + if keep then let val subdir = StringCvt.padLeft #"0" 4 (string_of_int thy_index) ^ "_" ^ thy_name in Path.append output_dir (Path.basic subdir) |> Isabelle_System.make_directory |> Path.implode |> SOME end else NONE - val timeout = Mirabelle.get_int_argument args (prover_timeoutK, 30) (* always use a hard timeout, but give some slack so that the automatic minimizer has a chance to do its magic *) - val preplay_timeout = AList.lookup (op =) args preplay_timeoutK - |> the_default preplay_timeout_default - val isar_proofsLST = available_parameter args isar_proofsK "isar_proofs" - val smt_proofsLST = available_parameter args smt_proofsK "smt_proofs" - val minimizeLST = available_parameter args minimizeK "minimize" - val max_new_mono_instancesLST = - available_parameter args max_new_mono_instancesK max_new_mono_instancesK - val max_mono_itersLST = available_parameter args max_mono_itersK max_mono_itersK - val hard_timeout = SOME (4 * timeout) + val hard_timeout = SOME (Time.scale 4.0 timeout) + val prover_name = hd provers val (msg, result) = - run_sh prover_name fact_filter type_enc strict max_facts slice lam_trans - uncurried_aliases e_selection_heuristic term_order force_sos - hard_timeout timeout preplay_timeout isar_proofsLST smt_proofsLST - minimizeLST max_new_mono_instancesLST max_mono_itersLST keep_dir pos st + run_sh params prover_name e_selection_heuristic term_order force_sos hard_timeout keep_dir pos + st in (case result of SH_OK (time_isa, time_prover, names) => let fun get_thms (name, stature) = try (Sledgehammer_Util.thms_of_name (Proof.context_of st)) name |> Option.map (pair (name, stature)) in change_data inc_sh_success; if trivial then () else change_data inc_sh_nontriv_success; change_data (inc_sh_lemmas (length names)); change_data (inc_sh_max_lems (length names)); change_data (inc_sh_time_isa time_isa); change_data (inc_sh_time_prover time_prover); - proof_method := proof_method_from_msg args msg; + proof_method := proof_method_from_msg msg; named_thms := SOME (map_filter get_thms names); triv_str ^ "succeeded (" ^ string_of_int time_isa ^ "+" ^ string_of_int time_prover ^ ") [" ^ prover_name ^ "]:\n" ^ msg end | SH_FAIL (time_isa, time_prover) => let val _ = change_data (inc_sh_time_isa time_isa) val _ = change_data (inc_sh_time_prover_fail time_prover) in triv_str ^ "failed: " ^ msg end | SH_ERROR => "failed: " ^ msg) end end fun override_params prover type_enc timeout = [("provers", prover), ("max_facts", "0"), ("type_enc", type_enc), ("strict", "true"), ("slice", "false"), ("timeout", timeout |> Time.toSeconds |> string_of_int)] fun run_proof_method change_data trivial full name meth named_thms timeout pos st = let fun do_method named_thms ctxt = let val ref_of_str = (* FIXME proper wrapper for parser combinators *) suffix ";" #> Token.explode (Thy_Header.get_keywords' ctxt) Position.none #> Parse.thm #> fst val thms = named_thms |> maps snd val facts = named_thms |> map (ref_of_str o fst o fst) val fact_override = {add = facts, del = [], only = true} fun my_timeout time_slice = timeout |> Time.toReal |> curry (op *) time_slice |> Time.fromReal fun sledge_tac time_slice prover type_enc = Sledgehammer_Tactics.sledgehammer_as_oracle_tac ctxt (override_params prover type_enc (my_timeout time_slice)) fact_override [] in if !meth = "sledgehammer_tac" then sledge_tac 0.2 ATP_Proof.vampireN "mono_native" ORELSE' sledge_tac 0.2 ATP_Proof.eN "poly_guards??" ORELSE' sledge_tac 0.2 ATP_Proof.spassN "mono_native" ORELSE' sledge_tac 0.2 ATP_Proof.z3_tptpN "poly_tags??" ORELSE' SMT_Solver.smt_tac ctxt thms else if !meth = "smt" then SMT_Solver.smt_tac ctxt thms else if full then Metis_Tactic.metis_tac [ATP_Proof_Reconstruct.full_typesN] ATP_Proof_Reconstruct.default_metis_lam_trans ctxt thms else if String.isPrefix "metis (" (!meth) then let val (type_encs, lam_trans) = !meth |> Token.explode (Thy_Header.get_keywords' ctxt) Position.start |> filter Token.is_proper |> tl |> Metis_Tactic.parse_metis_options |> fst |>> the_default [ATP_Proof_Reconstruct.partial_typesN] ||> the_default ATP_Proof_Reconstruct.default_metis_lam_trans in Metis_Tactic.metis_tac type_encs lam_trans ctxt thms end else if !meth = "metis" then Metis_Tactic.metis_tac [] ATP_Proof_Reconstruct.default_metis_lam_trans ctxt thms else if !meth = "none" then K all_tac else if !meth = "fail" then K no_tac else (warning ("Unknown method " ^ quote (!meth)); K no_tac) end fun apply_method named_thms = Mirabelle.can_apply timeout (do_method named_thms) st fun with_time (false, t) = "failed (" ^ string_of_int t ^ ")" | with_time (true, t) = (change_data inc_proof_method_success; if trivial then () else change_data inc_proof_method_nontriv_success; change_data (inc_proof_method_lemmas (length named_thms)); change_data (inc_proof_method_time t); change_data (inc_proof_method_posns (pos, trivial)); if name = "proof" then change_data inc_proof_method_proofs else (); "succeeded (" ^ string_of_int t ^ ")") fun timed_method named_thms = with_time (Mirabelle.cpu_time apply_method named_thms) handle Timeout.TIMEOUT _ => (change_data inc_proof_method_timeout; "timeout") | ERROR msg => ("error: " ^ msg) val _ = change_data inc_proof_method_calls val _ = if trivial then () else change_data inc_proof_method_nontriv_calls in timed_method named_thms end val try_timeout = seconds 5.0 fun make_action ({arguments, timeout, output_dir, ...} : Mirabelle.action_context) = let + (* Parse Mirabelle-specific parameters *) val check_trivial = Mirabelle.get_bool_argument arguments (check_trivialK, check_trivial_default) + val keep = Mirabelle.get_bool_argument arguments (keepK, keep_default) + val e_selection_heuristic = AList.lookup (op =) arguments e_selection_heuristicK + val term_order = AList.lookup (op =) arguments term_orderK + val force_sos = AList.lookup (op =) arguments force_sosK + |> Option.map (curry (op <>) "false") + val proof_method_from_msg = proof_method_from_msg arguments + + (* Parse Sledgehammer parameters *) + val params = Sledgehammer_Commands.default_params \<^theory> arguments + |> (fn (params as {provers, ...}) => + (case provers of + prover :: _ => Sledgehammer_Prover.set_params_provers params [prover] + | _ => error "sledgehammer action requires one prover")) val data = Synchronized.var "Mirabelle_Sledgehammer.data" empty_data val change_data = Synchronized.change data fun run_action ({theory_index, name, pos, pre, ...} : Mirabelle.command) = let val goal = Thm.major_prem_of (#goal (Proof.goal pre)) in if can Logic.dest_conjunction goal orelse can Logic.dest_equals goal then "" else let val meth = Unsynchronized.ref "" val named_thms = Unsynchronized.ref (NONE : ((string * stature) * thm list) list option) val trivial = check_trivial andalso Try0.try0 (SOME try_timeout) ([], [], [], []) pre handle Timeout.TIMEOUT _ => false val log1 = - run_sledgehammer change_data theory_index trivial output_dir arguments meth named_thms - pos pre + run_sledgehammer change_data params output_dir e_selection_heuristic term_order + force_sos keep proof_method_from_msg theory_index trivial meth named_thms pos pre val log2 = (case !named_thms of SOME thms => !meth ^ " (sledgehammer): " ^ run_proof_method change_data trivial false name meth thms timeout pos pre | NONE => "") in log1 ^ "\n" ^ log2 end end fun finalize () = log_data (Synchronized.value data) in {run_action = run_action, finalize = finalize} end val () = Mirabelle.register_action "sledgehammer" make_action -end +end \ No newline at end of file diff --git a/src/HOL/Tools/Sledgehammer/sledgehammer_atp_systems.ML b/src/HOL/Tools/Sledgehammer/sledgehammer_atp_systems.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_atp_systems.ML +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_atp_systems.ML @@ -1,720 +1,726 @@ (* Title: HOL/Tools/ATP/atp_systems.ML Author: Fabian Immler, TU Muenchen Author: Jasmin Blanchette, TU Muenchen Setup for supported ATPs. *) signature SLEDGEHAMMER_ATP_SYSTEMS = sig type term_order = ATP_Problem.term_order type atp_format = ATP_Problem.atp_format type atp_formula_role = ATP_Problem.atp_formula_role type atp_failure = ATP_Proof.atp_failure type slice_spec = (int * string) * atp_format * string * string * bool type atp_config = {exec : string list * string list, arguments : Proof.context -> bool -> string -> Time.time -> Path.T -> term_order * (unit -> (string * int) list) * (unit -> (string * real) list) -> string list, proof_delims : (string * string) list, known_failures : (atp_failure * string) list, prem_role : atp_formula_role, best_slices : Proof.context -> (real * (slice_spec * string)) list, best_max_mono_iters : int, best_max_new_mono_instances : int} val default_max_mono_iters : int val default_max_new_mono_instances : int val force_sos : bool Config.T val term_order : string Config.T val e_smartN : string val e_autoN : string val e_fun_weightN : string val e_sym_offset_weightN : string val e_selection_heuristic : string Config.T val e_default_fun_weight : real Config.T val e_fun_weight_base : real Config.T val e_fun_weight_span : real Config.T val e_default_sym_offs_weight : real Config.T val e_sym_offs_weight_base : real Config.T val e_sym_offs_weight_span : real Config.T val spass_H1SOS : string val spass_H2 : string val spass_H2LR0LT0 : string val spass_H2NuVS0 : string val spass_H2NuVS0Red2 : string val spass_H2SOS : string val isabelle_scala_function: string list * string list val remote_atp : string -> string -> string list -> (string * string) list -> (atp_failure * string) list -> atp_formula_role -> (Proof.context -> slice_spec * string) -> string * (unit -> atp_config) val add_atp : string * (unit -> atp_config) -> theory -> theory val get_atp : theory -> string -> (unit -> atp_config) val supported_atps : theory -> string list val is_atp_installed : theory -> string -> bool val refresh_systems_on_tptp : unit -> unit val effective_term_order : Proof.context -> string -> term_order end; structure Sledgehammer_ATP_Systems : SLEDGEHAMMER_ATP_SYSTEMS = struct open ATP_Problem open ATP_Proof open ATP_Problem_Generate (* ATP configuration *) +val TF0 = TFF (Monomorphic, Without_FOOL) +val TF1 = TFF (Polymorphic, Without_FOOL) +val TX0 = TFF (Monomorphic, With_FOOL {with_ite = true, with_let = true}) +val TX1 = TFF (Polymorphic, With_FOOL {with_ite = true, with_let = true}) +val TH0 = THF (Monomorphic, {with_ite = true, with_let = true}, THF_With_Choice) +val TH1 = THF (Polymorphic, {with_ite = true, with_let = true}, THF_With_Choice) + val default_max_mono_iters = 3 (* FUDGE *) val default_max_new_mono_instances = 100 (* FUDGE *) type slice_spec = (int * string) * atp_format * string * string * bool type atp_config = {exec : string list * string list, arguments : Proof.context -> bool -> string -> Time.time -> Path.T -> term_order * (unit -> (string * int) list) * (unit -> (string * real) list) -> string list, proof_delims : (string * string) list, known_failures : (atp_failure * string) list, prem_role : atp_formula_role, best_slices : Proof.context -> (real * (slice_spec * string)) list, best_max_mono_iters : int, best_max_new_mono_instances : int} (* "best_slices" must be found empirically, taking a holistic approach since the ATPs are run in parallel. Each slice has the format (time_frac, ((max_facts, fact_filter), format, type_enc, lam_trans, uncurried_aliases), extra) where time_frac = faction of the time available given to the slice (which should add up to 1.0) extra = extra information to the prover (e.g., SOS or no SOS). The last slice should be the most "normal" one, because it will get all the time available if the other slices fail early and also because it is used if slicing is disabled (e.g., by the minimizer). *) val mepoN = "mepo" val mashN = "mash" val meshN = "mesh" val tstp_proof_delims = [("% SZS output start CNFRefutation", "% SZS output end CNFRefutation"), ("% SZS output start Refutation", "% SZS output end Refutation"), ("% SZS output start Proof", "% SZS output end Proof")] fun known_szs_failures wrap = [(Unprovable, wrap "CounterSatisfiable"), (Unprovable, wrap "Satisfiable"), (GaveUp, wrap "GaveUp"), (GaveUp, wrap "Unknown"), (GaveUp, wrap "Incomplete"), (ProofMissing, wrap "Theorem"), (ProofMissing, wrap "Unsatisfiable"), (TimedOut, wrap "Timeout"), (Inappropriate, wrap "Inappropriate"), (OutOfResources, wrap "ResourceOut"), (OutOfResources, wrap "MemoryOut"), (Interrupted, wrap "Forced"), (Interrupted, wrap "User")] val known_szs_status_failures = known_szs_failures (prefix "SZS status ") val known_says_failures = known_szs_failures (prefix " says ") structure Data = Theory_Data ( type T = ((unit -> atp_config) * stamp) Symtab.table val empty = Symtab.empty fun merge data : T = Symtab.merge (eq_snd (op =)) data handle Symtab.DUP name => error ("Duplicate ATP: " ^ quote name) ) fun to_secs min time = Int.max (min, (Time.toMilliseconds time + 999) div 1000) fun normalize_weights xs = let val total_weight = real (fold (curry op + o fst) xs 0) in map (apfst (fn weight => real weight / total_weight)) xs end val sosN = "sos" val no_sosN = "no_sos" val force_sos = Attrib.setup_config_bool \<^binding>\atp_force_sos\ (K false) val smartN = "smart" (* val kboN = "kbo" *) val lpoN = "lpo" val xweightsN = "_weights" val xprecN = "_prec" val xsimpN = "_simp" (* SPASS-specific *) (* Possible values for "atp_term_order": "smart", "(kbo|lpo)(_weights)?(_prec|_simp)?" *) val term_order = Attrib.setup_config_string \<^binding>\atp_term_order\ (K smartN) - (* agsyHOL *) val agsyhol_config : atp_config = {exec = (["AGSYHOL_HOME"], ["agsyHOL"]), arguments = fn _ => fn _ => fn _ => fn timeout => fn problem => fn _ => ["--proof --time-out " ^ string_of_int (to_secs 1 timeout) ^ " " ^ File.bash_path problem], proof_delims = tstp_proof_delims, known_failures = known_szs_status_failures, prem_role = Hypothesis, best_slices = (* FUDGE *) - K [(1.0, (((60, ""), THF (Without_FOOL, Monomorphic, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), ""))], + K [(1.0, (((60, ""), THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), ""))], best_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), best_max_new_mono_instances = default_max_new_mono_instances} val agsyhol = (agsyholN, fn () => agsyhol_config) (* Alt-Ergo *) val alt_ergo_config : atp_config = {exec = (["WHY3_HOME"], ["why3"]), arguments = fn _ => fn _ => fn _ => fn timeout => fn problem => fn _ => ["--format tptp --prover 'Alt-Ergo,0.95.2,' --timelimit " ^ string_of_int (to_secs 1 timeout) ^ " " ^ File.bash_path problem], proof_delims = [], known_failures = [(ProofMissing, ": Valid"), (TimedOut, ": Timeout"), (GaveUp, ": Unknown")], prem_role = Hypothesis, best_slices = fn _ => (* FUDGE *) - [(1.0, (((100, ""), TFF (Without_FOOL, Polymorphic), "poly_native", liftingN, false), ""))], + [(1.0, (((100, ""), TF1, "poly_native", liftingN, false), ""))], best_max_mono_iters = default_max_mono_iters, best_max_new_mono_instances = default_max_new_mono_instances} val alt_ergo = (alt_ergoN, fn () => alt_ergo_config) (* E *) val e_smartN = "smart" val e_autoN = "auto" val e_fun_weightN = "fun_weight" val e_sym_offset_weightN = "sym_offset_weight" val e_selection_heuristic = Attrib.setup_config_string \<^binding>\atp_e_selection_heuristic\ (K e_smartN) (* FUDGE *) val e_default_fun_weight = Attrib.setup_config_real \<^binding>\atp_e_default_fun_weight\ (K 20.0) val e_fun_weight_base = Attrib.setup_config_real \<^binding>\atp_e_fun_weight_base\ (K 0.0) val e_fun_weight_span = Attrib.setup_config_real \<^binding>\atp_e_fun_weight_span\ (K 40.0) val e_default_sym_offs_weight = Attrib.setup_config_real \<^binding>\atp_e_default_sym_offs_weight\ (K 1.0) val e_sym_offs_weight_base = Attrib.setup_config_real \<^binding>\atp_e_sym_offs_weight_base\ (K ~20.0) val e_sym_offs_weight_span = Attrib.setup_config_real \<^binding>\atp_e_sym_offs_weight_span\ (K 60.0) fun e_selection_heuristic_case heuristic fw sow = if heuristic = e_fun_weightN then fw else if heuristic = e_sym_offset_weightN then sow else raise Fail ("unexpected " ^ quote heuristic) fun scaled_e_selection_weight ctxt heuristic w = w * Config.get ctxt (e_selection_heuristic_case heuristic e_fun_weight_span e_sym_offs_weight_span) + Config.get ctxt (e_selection_heuristic_case heuristic e_fun_weight_base e_sym_offs_weight_base) |> Real.ceil |> signed_string_of_int fun e_selection_weight_arguments ctxt heuristic sel_weights = if heuristic = e_fun_weightN orelse heuristic = e_sym_offset_weightN then (* supplied by Stephan Schulz *) "--split-clauses=4 --split-reuse-defs --simul-paramod --forward-context-sr \ \--destructive-er-aggressive --destructive-er --presat-simplify \ \--prefer-initial-clauses -winvfreqrank -c1 -Ginvfreqconjmax -F1 \ \--delete-bad-limit=150000000 -WSelectMaxLComplexAvoidPosPred -H'(4*" ^ e_selection_heuristic_case heuristic "FunWeight" "SymOffsetWeight" ^ "(SimulateSOS," ^ (e_selection_heuristic_case heuristic e_default_fun_weight e_default_sym_offs_weight |> Config.get ctxt |> Real.ceil |> signed_string_of_int) ^ ",20,1.5,1.5,1" ^ (sel_weights () |> map (fn (s, w) => "," ^ s ^ ":" ^ scaled_e_selection_weight ctxt heuristic w) |> implode) ^ "),3*ConjectureGeneralSymbolWeight(PreferNonGoals,200,100,200,50,50,1,100,\ \1.5,1.5,1),1*Clauseweight(PreferProcessed,1,1,1),1*\ \FIFOWeight(PreferProcessed))' " else "-xAuto " val e_ord_weights = map (fn (s, w) => s ^ ":" ^ string_of_int w) #> space_implode "," fun e_ord_precedence [_] = "" | e_ord_precedence info = info |> map fst |> space_implode "<" fun e_term_order_info_arguments false false _ = "" | e_term_order_info_arguments gen_weights gen_prec ord_info = let val ord_info = ord_info () in (if gen_weights then "--order-weights='" ^ e_ord_weights ord_info ^ "' " else "") ^ (if gen_prec then "--precedence='" ^ e_ord_precedence ord_info ^ "' " else "") end val e_config : atp_config = {exec = (["E_HOME"], ["eprover-ho", "eprover"]), arguments = fn ctxt => fn _ => fn heuristic => fn timeout => fn problem => fn ({is_lpo, gen_weights, gen_prec, ...}, ord_info, sel_weights) => ["--auto-schedule --tstp-in --tstp-out --silent " ^ e_selection_weight_arguments ctxt heuristic sel_weights ^ e_term_order_info_arguments gen_weights gen_prec ord_info ^ "--term-ordering=" ^ (if is_lpo then "LPO4" else "KBO6") ^ " " ^ "--cpu-limit=" ^ string_of_int (to_secs 2 timeout) ^ " --proof-object=1 " ^ File.bash_path problem], proof_delims = [("# SZS output start CNFRefutation", "# SZS output end CNFRefutation")] @ tstp_proof_delims, known_failures = [(TimedOut, "Failure: Resource limit exceeded (time)"), (TimedOut, "time limit exceeded")] @ known_szs_status_failures, prem_role = Conjecture, best_slices = fn ctxt => let val heuristic = Config.get ctxt e_selection_heuristic val (format, enc, main_lam_trans) = if string_ord (getenv "E_VERSION", "2.7") <> LESS then - (THF (With_FOOL, Monomorphic, THF_Without_Choice), "mono_native_higher_fool", keep_lamsN) + (THF (Monomorphic, {with_ite = true, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN) else if string_ord (getenv "E_VERSION", "2.6") <> LESS then - (THF (Without_FOOL, Monomorphic, THF_Without_Choice), "mono_native_higher", keep_lamsN) + (THF (Monomorphic, {with_ite = true, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN) else - (THF (Without_FOOL, Monomorphic, THF_Lambda_Free), "mono_native_higher", combsN) + (THF (Monomorphic, {with_ite = false, with_let = false}, THF_Lambda_Free), "mono_native_higher", combsN) in (* FUDGE *) if heuristic = e_smartN then [(0.15, (((128, meshN), format, enc, main_lam_trans, false), e_fun_weightN)), (0.15, (((128, mashN), format, enc, main_lam_trans, false), e_sym_offset_weightN)), (0.15, (((91, mepoN), format, enc, main_lam_trans, false), e_autoN)), (0.15, (((1000, meshN), format, "poly_guards??", main_lam_trans, false), e_sym_offset_weightN)), (0.15, (((256, mepoN), format, enc, liftingN, false), e_fun_weightN)), (0.25, (((64, mashN), format, enc, combsN, false), e_fun_weightN))] else [(1.0, (((500, ""), format, enc, combsN, false), heuristic))] end, best_max_mono_iters = default_max_mono_iters, best_max_new_mono_instances = default_max_new_mono_instances} val e = (eN, fn () => e_config) (* iProver *) val iprover_config : atp_config = {exec = (["IPROVER_HOME"], ["iproveropt", "iprover"]), arguments = fn _ => fn _ => fn _ => fn timeout => fn problem => fn _ => ["--clausifier \"$VAMPIRE_HOME\"/vampire " ^ "--clausifier_options \"--mode clausify\" " ^ "--time_out_real " ^ string_of_real (Time.toReal timeout) ^ " " ^ File.bash_path problem], proof_delims = tstp_proof_delims, known_failures = [(ProofIncomplete, "% SZS output start CNFRefutation")] @ known_szs_status_failures, prem_role = Hypothesis, best_slices = (* FUDGE *) K [(1.0, (((150, ""), FOF, "mono_guards??", liftingN, false), ""))], best_max_mono_iters = default_max_mono_iters, best_max_new_mono_instances = default_max_new_mono_instances} val iprover = (iproverN, fn () => iprover_config) (* LEO-II *) val leo2_config : atp_config = {exec = (["LEO2_HOME"], ["leo.opt", "leo"]), arguments = fn _ => fn full_proofs => fn _ => fn timeout => fn problem => fn _ => ["--foatp e --atp e=\"$E_HOME\"/eprover \ \--atp epclextract=\"$E_HOME\"/epclextract \ \--proofoutput 1 --timeout " ^ string_of_int (to_secs 1 timeout) ^ " " ^ (if full_proofs then "--notReplLeibnizEQ --notReplAndrewsEQ --notUseExtCnfCmbd " else "") ^ File.bash_path problem], proof_delims = tstp_proof_delims, known_failures = [(TimedOut, "CPU time limit exceeded, terminating"), (GaveUp, "No.of.Axioms")] @ known_szs_status_failures, prem_role = Hypothesis, best_slices = (* FUDGE *) - K [(1.0, (((40, ""), THF (Without_FOOL, Monomorphic, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), ""))], + K [(1.0, (((40, ""), THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), ""))], best_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), best_max_new_mono_instances = default_max_new_mono_instances} val leo2 = (leo2N, fn () => leo2_config) (* Leo-III *) (* Include choice? Disabled now since it's disabled for Satallax as well. *) val leo3_config : atp_config = {exec = (["LEO3_HOME"], ["leo3"]), arguments = fn _ => fn full_proofs => fn _ => fn timeout => fn problem => fn _ => [File.bash_path problem ^ " " ^ "--atp cvc=\"$CVC4_SOLVER\" --atp e=\"$E_HOME\"/eprover \ \-p -t " ^ string_of_int (to_secs 1 timeout) ^ " " ^ (if full_proofs then "--nleq --naeq " else "")], proof_delims = tstp_proof_delims, known_failures = known_szs_status_failures, prem_role = Hypothesis, best_slices = (* FUDGE *) - K [(1.0, (((150, ""), THF (Without_FOOL, Polymorphic, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), ""))], + K [(1.0, (((150, ""), THF (Polymorphic, {with_ite = true, with_let = true}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), ""))], best_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), best_max_new_mono_instances = default_max_new_mono_instances} val leo3 = (leo3N, fn () => leo3_config) (* Satallax *) (* Choice is disabled until there is proper reconstruction for it. *) val satallax_config : atp_config = {exec = (["SATALLAX_HOME"], ["satallax.opt", "satallax"]), arguments = fn _ => fn _ => fn _ => fn timeout => fn problem => fn _ => [(case getenv "E_HOME" of "" => "" | home => "-E " ^ home ^ "/eprover ") ^ "-p tstp -t " ^ string_of_int (to_secs 1 timeout) ^ " " ^ File.bash_path problem], proof_delims = [("% SZS output start Proof", "% SZS output end Proof")], known_failures = known_szs_status_failures, prem_role = Hypothesis, best_slices = (* FUDGE *) - K [(1.0, (((150, ""), THF (Without_FOOL, Monomorphic, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), ""))], + K [(1.0, (((150, ""), THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), ""))], best_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *), best_max_new_mono_instances = default_max_new_mono_instances} val satallax = (satallaxN, fn () => satallax_config) (* SPASS *) val spass_H1SOS = "-Heuristic=1 -SOS" val spass_H2 = "-Heuristic=2" val spass_H2LR0LT0 = "-Heuristic=2 -LR=0 -LT=0" val spass_H2NuVS0 = "-Heuristic=2 -RNuV=1 -Sorts=0" val spass_H2NuVS0Red2 = "-Heuristic=2 -RNuV=1 -Sorts=0 -RFRew=2 -RBRew=2 -RTaut=2" val spass_H2SOS = "-Heuristic=2 -SOS" val spass_config : atp_config = let val format = DFG Monomorphic in {exec = (["SPASS_HOME"], ["SPASS"]), arguments = fn _ => fn full_proofs => fn extra_options => fn timeout => fn problem => fn _ => ["-Isabelle=1 " ^ (if full_proofs then "-CNFRenaming=0 -Splits=0 " else "") ^ "-TimeLimit=" ^ string_of_int (to_secs 1 timeout) ^ " " ^ File.bash_path problem |> extra_options <> "" ? prefix (extra_options ^ " ")], proof_delims = [("Here is a proof", "Formulae used in the proof")], known_failures = [(GaveUp, "SPASS beiseite: Completion found"), (TimedOut, "SPASS beiseite: Ran out of time"), (OutOfResources, "SPASS beiseite: Maximal number of loops exceeded"), (MalformedInput, "Undefined symbol"), (MalformedInput, "Free Variable"), (Unprovable, "No formulae and clauses found in input file"), (InternalError, "Please report this error")], prem_role = Conjecture, best_slices = fn _ => (* FUDGE *) [(0.1667, (((150, meshN), format, "mono_native", combsN, true), "")), (0.1667, (((500, meshN), format, "mono_native", liftingN, true), spass_H2SOS)), (0.1666, (((50, meshN), format, "mono_native", liftingN, true), spass_H2LR0LT0)), (0.1000, (((250, meshN), format, "mono_native", combsN, true), spass_H2NuVS0)), (0.1000, (((1000, mepoN), format, "mono_native", liftingN, true), spass_H1SOS)), (0.1000, (((150, meshN), format, "poly_guards??", liftingN, false), spass_H2NuVS0Red2)), (0.1000, (((300, meshN), format, "mono_native", combsN, true), spass_H2SOS)), (0.1000, (((100, meshN), format, "mono_native", combs_and_liftingN, true), spass_H2))], best_max_mono_iters = default_max_mono_iters, best_max_new_mono_instances = default_max_new_mono_instances} end val spass = (spassN, fn () => spass_config) (* Vampire *) val vampire_basic_options = "--proof tptp --output_axiom_names on" ^ (if ML_System.platform_is_windows then "" (*time slicing is not support in the Windows version of Vampire*) else " --mode casc") val vampire_full_proof_options = " --proof_extra free --forced_options avatar=off:equality_proxy=off:general_splitting=off:inequality_splitting=0:naming=0" val vampire_config : atp_config = - let - val format = TFF (With_FOOL, Polymorphic) - in - {exec = (["VAMPIRE_HOME"], ["vampire"]), - arguments = fn _ => fn full_proofs => fn sos => fn timeout => fn problem => fn _ => - [vampire_basic_options ^ (if full_proofs then " " ^ vampire_full_proof_options else "") ^ - " -t " ^ string_of_int (to_secs 1 timeout) ^ " --input_file " ^ File.bash_path problem - |> sos = sosN ? prefix "--sos on "], - proof_delims = - [("=========== Refutation ==========", - "======= End of refutation =======")] @ - tstp_proof_delims, - known_failures = - [(GaveUp, "UNPROVABLE"), - (GaveUp, "CANNOT PROVE"), - (Unprovable, "Satisfiability detected"), - (Unprovable, "Termination reason: Satisfiable"), - (Interrupted, "Aborted by signal SIGINT")] @ - known_szs_status_failures, - prem_role = Hypothesis, - best_slices = fn ctxt => - (* FUDGE *) - [(0.333, (((500, meshN), format, "mono_native_fool", combs_or_liftingN, false), sosN)), - (0.333, (((150, meshN), format, "poly_native_fool", combs_or_liftingN, false), sosN)), - (0.334, (((50, meshN), format, "mono_native_fool", combs_or_liftingN, false), no_sosN))] - |> Config.get ctxt force_sos ? (hd #> apfst (K 1.0) #> single), - best_max_mono_iters = default_max_mono_iters, - best_max_new_mono_instances = 2 * default_max_new_mono_instances (* FUDGE *)} - end + {exec = (["VAMPIRE_HOME"], ["vampire"]), + arguments = fn _ => fn full_proofs => fn sos => fn timeout => fn problem => fn _ => + [vampire_basic_options ^ (if full_proofs then " " ^ vampire_full_proof_options else "") ^ + " -t " ^ string_of_int (to_secs 1 timeout) ^ " --input_file " ^ File.bash_path problem + |> sos = sosN ? prefix "--sos on "], + proof_delims = + [("=========== Refutation ==========", + "======= End of refutation =======")] @ + tstp_proof_delims, + known_failures = + [(GaveUp, "UNPROVABLE"), + (GaveUp, "CANNOT PROVE"), + (Unprovable, "Satisfiability detected"), + (Unprovable, "Termination reason: Satisfiable"), + (Interrupted, "Aborted by signal SIGINT")] @ + known_szs_status_failures, + prem_role = Hypothesis, + best_slices = fn ctxt => + (* FUDGE *) + [(0.333, (((500, meshN), TX1, "mono_native_fool", combs_or_liftingN, false), sosN)), + (0.333, (((150, meshN), TX1, "poly_native_fool", combs_or_liftingN, false), sosN)), + (0.334, (((50, meshN), TX1, "mono_native_fool", combs_or_liftingN, false), no_sosN))] + |> Config.get ctxt force_sos ? (hd #> apfst (K 1.0) #> single), + best_max_mono_iters = default_max_mono_iters, + best_max_new_mono_instances = 2 * default_max_new_mono_instances (* FUDGE *)} val vampire = (vampireN, fn () => vampire_config) (* Z3 with TPTP syntax (half experimental, half legacy) *) val z3_tptp_config : atp_config = - let - val format = TFF (Without_FOOL, Monomorphic) - in - {exec = (["Z3_TPTP_HOME"], ["z3_tptp"]), - arguments = fn _ => fn _ => fn _ => fn timeout => fn problem => fn _ => - ["-proof -t:" ^ string_of_int (to_secs 1 timeout) ^ " -file:" ^ File.bash_path problem], - proof_delims = [("SZS status Theorem", "")], - known_failures = known_szs_status_failures, - prem_role = Hypothesis, - best_slices = - (* FUDGE *) - K [(0.5, (((250, meshN), format, "mono_native", combsN, false), "")), - (0.25, (((125, mepoN), format, "mono_native", combsN, false), "")), - (0.125, (((62, mashN), format, "mono_native", combsN, false), "")), - (0.125, (((31, meshN), format, "mono_native", combsN, false), ""))], - best_max_mono_iters = default_max_mono_iters, - best_max_new_mono_instances = 2 * default_max_new_mono_instances (* FUDGE *)} - end + {exec = (["Z3_TPTP_HOME"], ["z3_tptp"]), + arguments = fn _ => fn _ => fn _ => fn timeout => fn problem => fn _ => + ["-proof -t:" ^ string_of_int (to_secs 1 timeout) ^ " -file:" ^ File.bash_path problem], + proof_delims = [("SZS status Theorem", "")], + known_failures = known_szs_status_failures, + prem_role = Hypothesis, + best_slices = + (* FUDGE *) + K [(0.5, (((250, meshN), TF0, "mono_native", combsN, false), "")), + (0.25, (((125, mepoN), TF0, "mono_native", combsN, false), "")), + (0.125, (((62, mashN), TF0, "mono_native", combsN, false), "")), + (0.125, (((31, meshN), TF0, "mono_native", combsN, false), ""))], + best_max_mono_iters = default_max_mono_iters, + best_max_new_mono_instances = 2 * default_max_new_mono_instances (* FUDGE *)} val z3_tptp = (z3_tptpN, fn () => z3_tptp_config) (* Zipperposition *) val zipperposition_config : atp_config = let - val format = THF (Without_FOOL, Polymorphic, THF_Without_Choice) - val enc = ((512, "meshN"), format, "mono_native_higher", keep_lamsN, false) + val format = + THF (Polymorphic, {with_ite = true, with_let = false}, THF_Without_Choice) + val enc = ((512, "meshN"), format, "mono_native_higher_fool", keep_lamsN, false) in {exec = (["ZIPPERPOSITION_HOME"], ["zipperposition"]), arguments = fn _ => fn _ => fn extra_options => fn timeout => fn problem => fn _ => ["--input tptp", "--output tptp", "--timeout " ^ Time.toString timeout, extra_options, File.bash_path problem], proof_delims = tstp_proof_delims, known_failures = [(TimedOut, "SZS status ResourceOut")] @ (* odd way of timing out *) known_szs_status_failures, prem_role = Hypothesis, best_slices = fn _ => [(1, (enc, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --max-inferences=1 --ho-unif-max-depth=1 --ho-max-elims=0 --ho-max-app-projections=0 --ho-max-rigid-imitations=1 --ho-max-identifications=0 --boolean-reasoning=bool-hoist --bool-hoist-simpl=true --bool-select=LI --recognize-injectivity=true --ext-rules=ext-family --ext-rules-max-depth=1 --ho-choice-inst=true --ho-prim-enum=none --ho-elim-leibniz=0 --interpret-bool-funs=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --ho-unif-level=pragmatic-framework --select=bb+e-selection2 --post-cnf-lambda-lifting=true -q \"4|prefer-sos|pnrefined(2,1,1,1,2,2,2)\" -q \"6|prefer-processed|conjecture-relative-struct(1.5,3.5,2,3)\" -q \"1|const|fifo\" -q \"4|prefer-ground|orient-lmax(2,1,2,1,1)\" -q \"4|defer-sos|conjecture-relative-struct(1,5,2,3)\" --avatar=off --recognize-injectivity=true --ho-neg-ext=true --e-timeout=2 --ho-pattern-decider=true --ho-fixpoint-decider=true --e-max-derived=50 --ignore-orphans=true --e-auto=true --presaturate=true --e-call-point=0.1")), (1, (enc, "--mode=ho-pragmatic --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=ext-family --ext-rules-max-depth=1 --ho-prim-enum=none --avatar=off --recognize-injectivity=true --ho-elim-leibniz=1 --ho-unif-level=pragmatic-framework --no-max-vars --max-inferences=2 --ho-unif-max-depth=1 -q \"6|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"6|const|conjecture-relative-var(1.02,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-easy-ho|conjecture-relative-var(1.01,s,f)\" --select=e-selection16 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=3 --e-auto=true --sine=50 --sine-tolerance=1.0 --sine-depth-max=3 --sine-depth-min=1 --sine-trim-implications=true --ho-unif-level=pragmatic-framework --e-encode-lambdas=lift --scan-clause-ac=false --kbo-weight-fun=lambda-def-invfreqrank --e-call-point=0.1")), (1, (enc, "-nc --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=off --ho-prim-enum=full --ho-prim-max=1 --avatar=off --recognize-injectivity=true --ho-elim-leibniz=4 --ho-unif-level=full-framework --no-max-vars -q \"2|prefer-goals|conjecture-relative-e(0.5,1,100,100,100,100,1.5,1.5,1)\" -q \"4|const|conjecture-relative-e(0.1,1,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-e(0.5,1,100,100,100,100,1.5,1.5,1.5)\" -q \"4|prefer-sos|pnrefined(1,1,1,1,2,1.5,2)\" --select=ho-selection5 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=5 --e-call-point=0.25 --e-auto=true --sine=50 --sine-tolerance=2 --sine-depth-max=4 --sine-depth-min=1 --e-max-derived=96 --e-encode-lambdas=lift --scan-clause-ac=false --kbo-weight-fun=arity0 --prec-gen-fun=invfreq_conj")), (1, (enc, "--mode=ho-pragmatic -nc --tptp-def-as-rewrite --rewrite-before-cnf=true --mode=ho-competitive --boolean-reasoning=simpl-only --ext-rules=ext-family --ext-rules-max-depth=1 --ho-prim-enum=none --avatar=off --recognize-injectivity=true --ho-elim-leibniz=1 --ho-unif-level=pragmatic-framework --no-max-vars --max-inferences=4 --ho-max-app-projections=1 --ho-max-elims=0 --ho-max-rigid-imitations=2 --ho-max-identifications=0 --ho-unif-max-depth=3 -q \"6|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"6|const|conjecture-relative-var(1.02,l,f)\" -q \"1|prefer-processed|fifo\" -q \"1|prefer-non-goals|conjecture-relative-var(1,l,f)\" -q \"4|prefer-easy-ho|conjecture-relative-var(1.01,s,f)\" --select=e-selection7 --ho-choice-inst=true --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=7 --sine=50 --sine-tolerance=1 --sine-depth-max=2 --sine-depth-min=1 --e-max-derived=64 --sine-ignore-k-most-common-syms=2 --sine-trim-implications=true --e-encode-lambdas=lift --scan-clause-ac=false --lambdasup=0 --kbo-weight-fun=lambda-def-invfreqrank --demod-in-var-args=true --bool-demod=true --lambda-demod=true --e-call-point=0.1")), (1, (enc, "--mode=ho-comb-complete --boolean-reasoning=simpl-only --ext-rules=off --kbo-weight-fun=lambda-def-sqarity --ho-prim-enum=none --tptp-def-as-rewrite -q \"4|prefer-sos|orient-lmax(2,1,2,1,1)\" -q \"4|defer-sos|conjecture-relative-var(1,s,f)\" -q \"3|const|default\" -q \"1|prefer-processed|fifo\" --ho-elim-leibniz=1 --select=NoSelection --solve-formulas=true --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=8 --sine=60 --sine-tolerance=2 --sine-depth-max=5 --sine-depth-min=1 --try-e=\"$E_HOME/eprover\" --tmp-dir=\"$ISABELLE_TMP_PREFIX\" --e-timeout=3 --e-auto=true --e-max-derived=50 --e-encode-lambdas=ignore --scan-clause-ac=false --presaturate=true --comb-b-penalty=3 --comb-c-penalty=3 --comb-k-penalty=1 --comb-s-penalty=5 --subvarsup=false --e-call-point=0.15")), (1, (enc, "--mode=ho-pragmatic --boolean-reasoning=simpl-only --ho-unif-max-depth=0 --ho-prim-enum=none -q \"2|prefer-ho-steps|conjecture-relative-e(0.1,0.5,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-sos|pnrefined(1,1,1,2,2,2,0.5)\" -q \"2|prefer-ground|default\" -q \"2|prefer-empty-trail|conjecture-relative-e(0.1,0.5,100,100,100,100,1.5,1.5,1.5)\" -q \"1|prefer-processed|fifo\" --select=bb+e-selection7 --ho-pattern-decider=false --ho-fixpoint-decider=true --ho-solid-decider=false --sine=150 --sine-tolerance=2 --sine-depth-max=3 --sine-depth-min=1 --prec-gen-fun=invfreqhack --lazy-cnf=true --lazy-cnf-kind=simp --lazy-cnf-renaming-threshold=2 --fluid-log-hoist=false --tptp-def-as-rewrite --rewrite-before-cnf=true --ho-prim-enum=eq --ho-prim-enum-add-var=true --ho-prim-max=1 --ho-prim-enum-early-bird=true -o tptp --avatar=eager --split-only-ground=true"))] |> normalize_weights, best_max_mono_iters = default_max_mono_iters, best_max_new_mono_instances = default_max_new_mono_instances} end val zipperposition = (zipperpositionN, fn () => zipperposition_config) (* Remote ATP invocation via SystemOnTPTP *) val no_remote_systems = {url = "", systems = [] : string list} val remote_systems = Synchronized.var "atp_remote_systems" no_remote_systems fun get_remote_systems () = Timeout.apply (seconds 10.0) SystemOnTPTP.list_systems () handle ERROR msg => (warning msg; no_remote_systems) | Timeout.TIMEOUT _ => no_remote_systems fun find_remote_system name [] systems = find_first (String.isPrefix (name ^ "---")) systems | find_remote_system name (version :: versions) systems = case find_first (String.isPrefix (name ^ "---" ^ version)) systems of NONE => find_remote_system name versions systems | res => res fun get_remote_system name versions = Synchronized.change_result remote_systems (fn remote => (if #url remote <> SystemOnTPTP.get_url () orelse null (#systems remote) then get_remote_systems () else remote) |> ` #systems) |> `(find_remote_system name versions) fun the_remote_system name versions = (case get_remote_system name versions of (SOME sys, _) => sys | (NONE, []) => error "SystemOnTPTP is currently not available" | (NONE, syss) => (case syss |> filter_out (String.isPrefix "%") |> filter_out (curry (op =) "") of [] => error "SystemOnTPTP is currently not available" | [msg] => error ("SystemOnTPTP is currently not available: " ^ msg) | syss => error ("System " ^ quote name ^ " is not available at SystemOnTPTP.\n(Available systems: " ^ commas_quote syss ^ ".)"))) val max_remote_secs = 1000 (* give Geoff Sutcliffe's servers a break *) val isabelle_scala_function = (["SCALA_HOME"], ["bin/scala"]) fun remote_config system_name system_versions proof_delims known_failures prem_role best_slice = {exec = isabelle_scala_function, arguments = fn _ => fn _ => fn command => fn timeout => fn problem => fn _ => [the_remote_system system_name system_versions, Isabelle_System.absolute_path problem, command, string_of_int (Int.min (max_remote_secs, to_secs 1 timeout) * 1000)], proof_delims = union (op =) tstp_proof_delims proof_delims, known_failures = known_failures @ known_says_failures, prem_role = prem_role, best_slices = fn ctxt => [(1.0, best_slice ctxt)], best_max_mono_iters = default_max_mono_iters, best_max_new_mono_instances = default_max_new_mono_instances} : atp_config fun remotify_config system_name system_versions best_slice ({proof_delims, known_failures, prem_role, ...} : atp_config) = remote_config system_name system_versions proof_delims known_failures prem_role best_slice fun remote_atp name system_name system_versions proof_delims known_failures prem_role best_slice = (remote_prefix ^ name, fn () => remote_config system_name system_versions proof_delims known_failures prem_role best_slice) fun remotify_atp (name, config) system_name system_versions best_slice = (remote_prefix ^ name, remotify_config system_name system_versions best_slice o config) fun gen_remote_waldmeister name type_enc = remote_atp name "Waldmeister" ["710"] tstp_proof_delims ([(OutOfResources, "Too many function symbols"), (Inappropriate, "**** Unexpected end of file."), (Crashed, "Unrecoverable Segmentation Fault")] @ known_szs_status_failures) Hypothesis (K (((50, ""), CNF_UEQ, type_enc, combsN, false), "") (* FUDGE *)) val remote_agsyhol = remotify_atp agsyhol "agsyHOL" ["1.0", "1"] - (K (((60, ""), THF (Without_FOOL, Monomorphic, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), "") (* FUDGE *)) + (K (((60, ""), THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), "") (* FUDGE *)) val remote_alt_ergo = remotify_atp alt_ergo "Alt-Ergo" ["0.95.2"] - (K (((250, ""), TFF (Without_FOOL, Polymorphic), "poly_native", keep_lamsN, false), "") (* FUDGE *)) + (K (((250, ""), TF1, "poly_native", keep_lamsN, false), "") (* FUDGE *)) val remote_e = remotify_atp e "E" ["2.0", "1.9.1", "1.8"] - (K (((750, ""), TFF (Without_FOOL, Monomorphic), "mono_native", combsN, false), "") (* FUDGE *)) + (K (((750, ""), TF0, "mono_native", combsN, false), "") (* FUDGE *)) val remote_iprover = remotify_atp iprover "iProver" ["0.99"] (K (((150, ""), FOF, "mono_guards??", liftingN, false), "") (* FUDGE *)) val remote_leo2 = remotify_atp leo2 "LEO-II" ["1.5.0", "1.4", "1.3", "1.2", "1"] - (K (((40, ""), THF (Without_FOOL, Monomorphic, THF_Without_Choice), "mono_native_higher", liftingN, false), "") (* FUDGE *)) + (K (((40, ""), THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", liftingN, false), "") (* FUDGE *)) val remote_leo3 = remotify_atp leo3 "Leo-III" ["1.1"] - (K (((150, ""), THF (Without_FOOL, Polymorphic, THF_Without_Choice), "poly_native_higher", keep_lamsN, false), "") (* FUDGE *)) + (K (((150, ""), THF (Polymorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "poly_native_higher", keep_lamsN, false), "") (* FUDGE *)) val remote_waldmeister = gen_remote_waldmeister waldmeisterN "raw_mono_tags??" val remote_zipperposition = remotify_atp zipperposition "Zipperpin" ["2.1", "2.0"] - (K (((512, ""), THF (Without_FOOL, Monomorphic, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), "") (* FUDGE *)) + (K (((512, ""), THF (Monomorphic, {with_ite = false, with_let = false}, THF_Without_Choice), "mono_native_higher", keep_lamsN, false), "") (* FUDGE *)) (* Dummy prover *) fun dummy_config prem_role format type_enc uncurried_aliases : atp_config = {exec = (["ISABELLE_ATP"], ["scripts/dummy_atp"]), arguments = K (K (K (K (K (K []))))), proof_delims = [], known_failures = known_szs_status_failures, prem_role = prem_role, best_slices = K [(1.0, (((200, ""), format, type_enc, if is_format_higher_order format then keep_lamsN else combsN, uncurried_aliases), ""))], best_max_mono_iters = default_max_mono_iters, best_max_new_mono_instances = default_max_new_mono_instances} - -val dummy_fof_format = FOF -val dummy_fof_config = dummy_config Hypothesis dummy_fof_format "mono_guards??" false -val dummy_fof = (dummy_fofN, fn () => dummy_fof_config) +val dummy_fof = + let + val config = dummy_config Hypothesis FOF "mono_guards??" false + in (dummy_fofN, fn () => config) end -val dummy_tfx_format = TFF (With_FOOL, Polymorphic) -val dummy_tfx_config = dummy_config Hypothesis dummy_tfx_format "mono_native_fool" false -val dummy_tfx = (dummy_tfxN, fn () => dummy_tfx_config) +val dummy_tfx = + let + val config = dummy_config Hypothesis TX1 "poly_native_fool" false + in (dummy_tfxN, fn () => config) end -val dummy_thf_format = THF (With_FOOL, Polymorphic, THF_With_Choice) -val dummy_thf_config = dummy_config Hypothesis dummy_thf_format "mono_native_higher_fool" false -val dummy_thf = (dummy_thfN, fn () => dummy_thf_config) +val dummy_thf = + let + val config = dummy_config Hypothesis TH1 "poly_native_higher" false + in (dummy_thfN, fn () => config) end +val dummy_thf_reduced = + let + val format = THF (Polymorphic, {with_ite = false, with_let = false}, THF_Without_Choice) + val config = dummy_config Hypothesis format "poly_native_higher" false + in (dummy_thfN ^ "_reduced", fn () => config) end (* Setup *) fun add_atp (name, config) thy = Data.map (Symtab.update_new (name, (config, stamp ()))) thy handle Symtab.DUP name => error ("Duplicate ATP: " ^ quote name) fun get_atp thy name = fst (the (Symtab.lookup (Data.get thy) name)) handle Option.Option => error ("Unknown ATP: " ^ name) val supported_atps = Symtab.keys o Data.get fun is_atp_installed thy name = let val {exec, ...} = get_atp thy name () in exists (fn var => getenv var <> "") (fst exec) end fun refresh_systems_on_tptp () = Synchronized.change remote_systems (fn _ => get_remote_systems ()) fun effective_term_order ctxt atp = let val ord = Config.get ctxt term_order in if ord = smartN then {is_lpo = false, gen_weights = (atp = spassN), gen_prec = (atp = spassN), gen_simp = false} else let val is_lpo = String.isSubstring lpoN ord in {is_lpo = is_lpo, gen_weights = not is_lpo andalso String.isSubstring xweightsN ord, gen_prec = String.isSubstring xprecN ord, gen_simp = String.isSubstring xsimpN ord} end end val atps = [agsyhol, alt_ergo, e, iprover, leo2, leo3, satallax, spass, vampire, z3_tptp, zipperposition, remote_agsyhol, remote_alt_ergo, remote_e, remote_iprover, remote_leo2, remote_leo3, - remote_waldmeister, remote_zipperposition, dummy_fof, dummy_tfx, dummy_thf] + remote_waldmeister, remote_zipperposition, dummy_fof, dummy_tfx, dummy_thf, dummy_thf_reduced] val _ = Theory.setup (fold add_atp atps) end; diff --git a/src/HOL/Tools/Sledgehammer/sledgehammer_prover.ML b/src/HOL/Tools/Sledgehammer/sledgehammer_prover.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover.ML +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover.ML @@ -1,224 +1,253 @@ (* Title: HOL/Tools/Sledgehammer/sledgehammer_prover.ML Author: Fabian Immler, TU Muenchen Author: Makarius Author: Jasmin Blanchette, TU Muenchen Generic prover abstraction for Sledgehammer. *) signature SLEDGEHAMMER_PROVER = sig type atp_failure = ATP_Proof.atp_failure type stature = ATP_Problem_Generate.stature type type_enc = ATP_Problem_Generate.type_enc type fact = Sledgehammer_Fact.fact type proof_method = Sledgehammer_Proof_Methods.proof_method type play_outcome = Sledgehammer_Proof_Methods.play_outcome datatype mode = Auto_Try | Try | Normal | Minimize | MaSh datatype induction_rules = Include | Exclude | Instantiate val induction_rules_of_string : string -> induction_rules option type params = {debug : bool, verbose : bool, overlord : bool, spy : bool, provers : string list, type_enc : string option, strict : bool, lam_trans : string option, uncurried_aliases : bool option, learn : bool, fact_filter : string option, induction_rules : induction_rules option, max_facts : int option, fact_thresholds : real * real, max_mono_iters : int option, max_new_mono_instances : int option, isar_proofs : bool option, compress : real option, try0 : bool, smt_proofs : bool, slice : bool, minimize : bool, timeout : Time.time, preplay_timeout : Time.time, expect : string} + val set_params_provers : params -> string list -> params + type prover_problem = {comment : string, state : Proof.state, goal : thm, subgoal : int, subgoal_count : int, factss : (string * fact list) list, found_proof : unit -> unit} type prover_result = {outcome : atp_failure option, used_facts : (string * stature) list, used_from : fact list, preferred_methss : proof_method * proof_method list list, run_time : Time.time, message : (unit -> (string * stature) list * (proof_method * play_outcome)) -> string} type prover = params -> prover_problem -> prover_result val SledgehammerN : string val str_of_mode : mode -> string val overlord_file_location_of_prover : string -> string * string val proof_banner : mode -> string -> string val is_atp : theory -> string -> bool val bunches_of_proof_methods : Proof.context -> bool -> bool -> bool -> string -> proof_method list list val is_fact_chained : (('a * stature) * 'b) -> bool val filter_used_facts : bool -> (''a * stature) list -> ((''a * stature) * 'b) list -> ((''a * stature) * 'b) list val repair_monomorph_context : int option -> int -> int option -> int -> Proof.context -> Proof.context val supported_provers : Proof.context -> unit end; structure Sledgehammer_Prover : SLEDGEHAMMER_PROVER = struct open ATP_Proof open ATP_Util open ATP_Problem open ATP_Problem_Generate open ATP_Proof_Reconstruct open Metis_Tactic open Sledgehammer_Fact open Sledgehammer_Proof_Methods open Sledgehammer_ATP_Systems (* Identifier that distinguishes Sledgehammer from other tools that could use "Async_Manager". *) val SledgehammerN = "Sledgehammer" datatype mode = Auto_Try | Try | Normal | Minimize | MaSh fun str_of_mode Auto_Try = "Auto Try" | str_of_mode Try = "Try" | str_of_mode Normal = "Normal" | str_of_mode Minimize = "Minimize" | str_of_mode MaSh = "MaSh" datatype induction_rules = Include | Exclude | Instantiate fun induction_rules_of_string "include" = SOME Include | induction_rules_of_string "exclude" = SOME Exclude | induction_rules_of_string "instantiate" = SOME Instantiate | induction_rules_of_string _ = NONE val is_atp = member (op =) o supported_atps type params = {debug : bool, verbose : bool, overlord : bool, spy : bool, provers : string list, type_enc : string option, strict : bool, lam_trans : string option, uncurried_aliases : bool option, learn : bool, fact_filter : string option, induction_rules : induction_rules option, max_facts : int option, fact_thresholds : real * real, max_mono_iters : int option, max_new_mono_instances : int option, isar_proofs : bool option, compress : real option, try0 : bool, smt_proofs : bool, slice : bool, minimize : bool, timeout : Time.time, preplay_timeout : Time.time, expect : string} +fun set_params_provers params provers = + {debug = #debug params, + verbose = #verbose params, + overlord = #overlord params, + spy = #spy params, + provers = provers, + type_enc = #type_enc params, + strict = #strict params, + lam_trans = #lam_trans params, + uncurried_aliases = #uncurried_aliases params, + learn = #learn params, + fact_filter = #fact_filter params, + induction_rules = #induction_rules params, + max_facts = #max_facts params, + fact_thresholds = #fact_thresholds params, + max_mono_iters = #max_mono_iters params, + max_new_mono_instances = #max_new_mono_instances params, + isar_proofs = #isar_proofs params, + compress = #compress params, + try0 = #try0 params, + smt_proofs = #smt_proofs params, + slice = #slice params, + minimize = #minimize params, + timeout = #timeout params, + preplay_timeout = #preplay_timeout params, + expect = #expect params} + type prover_problem = {comment : string, state : Proof.state, goal : thm, subgoal : int, subgoal_count : int, factss : (string * fact list) list, found_proof : unit -> unit} type prover_result = {outcome : atp_failure option, used_facts : (string * stature) list, used_from : fact list, preferred_methss : proof_method * proof_method list list, run_time : Time.time, message : (unit -> (string * stature) list * (proof_method * play_outcome)) -> string} type prover = params -> prover_problem -> prover_result fun overlord_file_location_of_prover prover = (getenv "ISABELLE_HOME_USER", "prob_" ^ prover) fun proof_banner mode name = (case mode of Auto_Try => "Auto Sledgehammer (" ^ quote name ^ ") found a proof" | Try => "Sledgehammer (" ^ quote name ^ ") found a proof" | _ => "Try this") fun bunches_of_proof_methods ctxt try0 smt_proofs needs_full_types desperate_lam_trans = let val try0_methodss = if try0 then [[Simp_Method, Auto_Method, Blast_Method, Linarith_Method, Meson_Method, Metis_Method (NONE, NONE), Fastforce_Method, Force_Method, Presburger_Method]] else [] val metis_methods = (if try0 then [] else [Metis_Method (NONE, NONE)]) @ Metis_Method (SOME full_typesN, NONE) :: Metis_Method (SOME really_full_type_enc, SOME desperate_lam_trans) :: (if needs_full_types then [Metis_Method (SOME really_full_type_enc, NONE), Metis_Method (SOME full_typesN, SOME desperate_lam_trans)] else [Metis_Method (SOME no_typesN, SOME desperate_lam_trans)]) val smt_methodss = if smt_proofs then [map (SMT_Method o SMT_Verit) (Verit_Proof.all_veriT_stgies (Context.Proof ctxt)), [SMT_Method SMT_Z3]] else [] in try0_methodss @ [metis_methods] @ smt_methodss end fun is_fact_chained ((_, (sc, _)), _) = sc = Chained fun filter_used_facts keep_chained used = filter ((member (eq_fst (op =)) used o fst) orf (if keep_chained then is_fact_chained else K false)) val max_fact_instances = 10 (* FUDGE *) fun repair_monomorph_context max_iters best_max_iters max_new_instances best_max_new_instances = Config.put Monomorph.max_rounds (max_iters |> the_default best_max_iters) #> Config.put Monomorph.max_new_instances (max_new_instances |> the_default best_max_new_instances) #> Config.put Monomorph.max_thm_instances max_fact_instances fun supported_provers ctxt = let val thy = Proof_Context.theory_of ctxt val (remote_provers, local_provers) = sort_strings (supported_atps thy) @ sort_strings (SMT_Config.available_solvers_of ctxt) |> List.partition (String.isPrefix remote_prefix) in writeln ("Supported provers: " ^ commas (local_provers @ remote_provers)) end end; diff --git a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML @@ -1,407 +1,416 @@ (* Title: HOL/Tools/Sledgehammer/sledgehammer_prover_atp.ML Author: Fabian Immler, TU Muenchen Author: Makarius Author: Jasmin Blanchette, TU Muenchen ATPs as Sledgehammer provers. *) signature SLEDGEHAMMER_PROVER_ATP = sig type mode = Sledgehammer_Prover.mode type prover = Sledgehammer_Prover.prover val atp_dest_dir : string Config.T val atp_problem_prefix : string Config.T val atp_completish : int Config.T val atp_full_names : bool Config.T val is_ho_atp : Proof.context -> string -> bool val run_atp : mode -> string -> prover end; structure Sledgehammer_Prover_ATP : SLEDGEHAMMER_PROVER_ATP = struct open ATP_Util open ATP_Problem open ATP_Problem_Generate open ATP_Proof open ATP_Proof_Reconstruct open Sledgehammer_Util open Sledgehammer_Proof_Methods open Sledgehammer_Isar open Sledgehammer_ATP_Systems open Sledgehammer_Prover (* Empty string means create files in Isabelle's temporary files directory. *) val atp_dest_dir = Attrib.setup_config_string \<^binding>\sledgehammer_atp_dest_dir\ (K "") val atp_problem_prefix = Attrib.setup_config_string \<^binding>\sledgehammer_atp_problem_prefix\ (K "prob") val atp_completish = Attrib.setup_config_int \<^binding>\sledgehammer_atp_completish\ (K 0) (* In addition to being easier to read, readable names are often much shorter, especially if types are mangled in names. This makes a difference for some provers (e.g., E). For these reason, short names are enabled by default. *) val atp_full_names = Attrib.setup_config_bool \<^binding>\sledgehammer_atp_full_names\ (K false) fun is_atp_of_format is_format ctxt name = let val thy = Proof_Context.theory_of ctxt in (case try (get_atp thy) name of SOME config => exists (fn (_, ((_, format, _, _, _), _)) => is_format format) (#best_slices (config ()) ctxt) | NONE => false) end val is_ho_atp = is_atp_of_format is_format_higher_order fun choose_type_enc strictness best_type_enc format = the_default best_type_enc #> type_enc_of_string strictness #> adjust_type_enc format fun has_bound_or_var_of_type pred = exists_subterm (fn Var (_, T as Type _) => pred T | Abs (_, T as Type _, _) => pred T | _ => false) (* Unwanted equalities are those between a (bound or schematic) variable that does not properly occur in the second operand. *) val is_exhaustive_finite = let fun is_bad_equal (Var z) t = not (exists_subterm (fn Var z' => z = z' | _ => false) t) | is_bad_equal (Bound j) t = not (loose_bvar1 (t, j)) | is_bad_equal _ _ = false fun do_equals t1 t2 = is_bad_equal t1 t2 orelse is_bad_equal t2 t1 fun do_formula pos t = (case (pos, t) of (_, \<^Const_>\Trueprop for t1\) => do_formula pos t1 | (true, Const (\<^const_name>\Pure.all\, _) $ Abs (_, _, t')) => do_formula pos t' | (true, Const (\<^const_name>\All\, _) $ Abs (_, _, t')) => do_formula pos t' | (false, Const (\<^const_name>\Ex\, _) $ Abs (_, _, t')) => do_formula pos t' | (_, \<^Const_>\Pure.imp for t1 t2\) => do_formula (not pos) t1 andalso (t2 = \<^prop>\False\ orelse do_formula pos t2) | (_, \<^Const_>\implies for t1 t2\) => do_formula (not pos) t1 andalso (t2 = \<^Const>\False\ orelse do_formula pos t2) | (_, \<^Const_>\Not for t1\) => do_formula (not pos) t1 | (true, \<^Const_>\disj for t1 t2\) => forall (do_formula pos) [t1, t2] | (false, \<^Const_>\conj for t1 t2\) => forall (do_formula pos) [t1, t2] | (true, Const (\<^const_name>\HOL.eq\, _) $ t1 $ t2) => do_equals t1 t2 | (true, Const (\<^const_name>\Pure.eq\, _) $ t1 $ t2) => do_equals t1 t2 | _ => false) in do_formula true end (* Facts containing variables of finite types such as "unit" or "bool" or of the form "ALL x. x = A | x = B | x = C" are likely to lead to untypable proofs for unsound type encodings. *) fun is_dangerous_prop ctxt = transform_elim_prop #> (has_bound_or_var_of_type (is_type_surely_finite ctxt) orf is_exhaustive_finite) fun get_slices slice slices = (0 upto length slices - 1) ~~ slices |> not slice ? (List.last #> single) fun get_facts_of_filter _ [(_, facts)] = facts | get_facts_of_filter fact_filter factss = (case AList.lookup (op =) factss fact_filter of SOME facts => facts | NONE => snd (hd factss)) (* For low values of "max_facts", this fudge value ensures that most slices are invoked with a nontrivial amount of facts. *) val max_fact_factor_fudge = 5 val mono_max_privileged_facts = 10 fun suffix_of_mode Auto_Try = "_try" | suffix_of_mode Try = "_try" | suffix_of_mode Normal = "" | suffix_of_mode MaSh = "" | suffix_of_mode Minimize = "_min" (* Give the ATPs some slack before interrupting them the hard way. "z3_tptp" on Linux appears to be the only ATP that does not honor its time limit. *) val atp_timeout_slack = seconds 1.0 (* Important messages are important but not so important that users want to see them each time. *) val atp_important_message_keep_quotient = 25 fun run_atp mode name ({debug, verbose, overlord, type_enc, strict, lam_trans, uncurried_aliases, fact_filter, max_facts, max_mono_iters, max_new_mono_instances, isar_proofs, compress, try0, smt_proofs, slice, minimize, timeout, preplay_timeout, ...} : params) ({comment, state, goal, subgoal, subgoal_count, factss, found_proof, ...} : prover_problem) = let val thy = Proof.theory_of state val ctxt = Proof.context_of state val {exec, arguments, proof_delims, known_failures, prem_role, best_slices, best_max_mono_iters, best_max_new_mono_instances, ...} = get_atp thy name () val full_proofs = isar_proofs |> the_default (mode = Minimize) val local_name = perhaps (try (unprefix remote_prefix)) name val completish = Config.get ctxt atp_completish val atp_mode = if completish > 0 then Sledgehammer_Completish completish else Sledgehammer val (_, hyp_ts, concl_t) = strip_subgoal goal subgoal ctxt val (dest_dir, problem_prefix) = if overlord then overlord_file_location_of_prover name else (Config.get ctxt atp_dest_dir, Config.get ctxt atp_problem_prefix) val problem_file_name = Path.basic (problem_prefix ^ (if overlord then "" else serial_string ()) ^ suffix_of_mode mode ^ "_" ^ string_of_int subgoal) + |> Path.ext "p" val prob_path = if dest_dir = "" then File.tmp_path problem_file_name else if File.exists (Path.explode dest_dir) then Path.explode dest_dir + problem_file_name else error ("No such directory: " ^ quote dest_dir) val executable = (case find_first (fn var => getenv var <> "") (fst exec) of SOME var => let val pref = getenv var ^ "/" val paths = map (Path.explode o prefix pref) (if ML_System.platform_is_windows then map (suffix ".exe") (snd exec) @ snd exec else snd exec); in (case find_first File.exists paths of SOME path => path | NONE => error ("Bad executable: " ^ Path.print (hd paths))) end | NONE => error ("The environment variable " ^ quote (List.last (fst exec)) ^ " is not set")) fun run () = let (* If slicing is disabled, we expand the last slice to fill the entire time available. *) val all_slices = best_slices ctxt val actual_slices = get_slices slice all_slices fun max_facts_of_slices (slices : (real * (slice_spec * string)) list) = fold (Integer.max o fst o #1 o fst o snd) slices 0 val num_actual_slices = length actual_slices val max_fact_factor = Real.fromInt (case max_facts of NONE => max_facts_of_slices all_slices | SOME max => max) / Real.fromInt (max_facts_of_slices (map snd actual_slices)) fun monomorphize_facts facts = let val ctxt = ctxt |> repair_monomorph_context max_mono_iters best_max_mono_iters max_new_mono_instances best_max_new_mono_instances (* pseudo-theorem involving the same constants as the subgoal *) val subgoal_th = Logic.list_implies (hyp_ts, concl_t) |> Skip_Proof.make_thm thy val rths = facts |> chop mono_max_privileged_facts |>> map (pair 1 o snd) ||> map (pair 2 o snd) |> op @ |> cons (0, subgoal_th) in Monomorph.monomorph atp_schematic_consts_of ctxt rths |> tl |> curry ListPair.zip (map fst facts) |> maps (fn (name, rths) => map (pair name o zero_var_indexes o snd) rths) end val real_ms = Real.fromInt o Time.toMilliseconds (* TODO: replace this seems-to-work per-slice overhead with actually-measured value *) val slices_overhead_ms = Int.max (0, num_actual_slices * 25) val slices_timeout_ms = real (Time.toMilliseconds timeout - slices_overhead_ms) fun run_slice time_left (cache_key, cache_value) (slice, (time_frac, (key as ((best_max_facts, best_fact_filter), format, best_type_enc, best_lam_trans, best_uncurried_aliases), extra))) = let val effective_fact_filter = fact_filter |> the_default best_fact_filter val facts = get_facts_of_filter effective_fact_filter factss val num_facts = Real.ceil (max_fact_factor * Real.fromInt best_max_facts) + max_fact_factor_fudge |> Integer.min (length facts) val strictness = if strict then Strict else Non_Strict val type_enc = type_enc |> choose_type_enc strictness best_type_enc format val slice_timeout = (real_ms time_left |> (if slice < num_actual_slices - 1 then curry Real.min (time_frac * slices_timeout_ms) else I)) * 0.001 |> seconds val generous_slice_timeout = if mode = MaSh then one_day else slice_timeout + atp_timeout_slack val _ = if debug then quote name ^ " slice #" ^ string_of_int (slice + 1) ^ " with " ^ string_of_int num_facts ^ " fact" ^ plural_s num_facts ^ " for " ^ string_of_time slice_timeout ^ "..." |> writeln else () val value as (atp_problem, _, _, _) = if cache_key = SOME key then cache_value else let val sound = is_type_enc_sound type_enc val generate_info = (case format of DFG _ => true | _ => false) val readable_names = not (Config.get ctxt atp_full_names) val lam_trans = lam_trans |> the_default best_lam_trans val uncurried_aliases = uncurried_aliases |> the_default best_uncurried_aliases in facts |> not sound ? filter_out (is_dangerous_prop ctxt o Thm.prop_of o snd) |> take num_facts |> not (is_type_enc_polymorphic type_enc) ? monomorphize_facts |> map (apsnd Thm.prop_of) |> generate_atp_problem ctxt generate_info format prem_role type_enc atp_mode lam_trans uncurried_aliases readable_names true hyp_ts concl_t end fun sel_weights () = atp_problem_selection_weights atp_problem fun ord_info () = atp_problem_term_order_info atp_problem val ord = effective_term_order ctxt name val args = arguments ctxt full_proofs extra slice_timeout prob_path (ord, ord_info, sel_weights) val command = space_implode " " (File.bash_path executable :: args) fun run_command () = if exec = isabelle_scala_function then let val {output, timing} = SystemOnTPTP.run_system_encoded args in (output, timing) end else let val res = Isabelle_System.bash_process (Bash.script command |> Bash.redirect) in (Process_Result.out res, Process_Result.timing_elapsed res) end val _ = atp_problem |> lines_of_atp_problem format ord ord_info |> (exec <> isabelle_scala_function) ? cons ("% " ^ command ^ "\n" ^ (if comment = "" then "" else "% " ^ comment ^ "\n")) |> File.write_list prob_path val ((output, run_time), (atp_proof, outcome)) = Timeout.apply generous_slice_timeout run_command () |>> overlord ? (fn output => prefix ("% " ^ command ^ "\n% " ^ timestamp () ^ "\n") output) |> (fn accum as (output, _) => (accum, extract_tstplike_proof_and_outcome verbose proof_delims known_failures output |>> atp_proof_of_tstplike_proof (perhaps (try (unprefix remote_prefix)) name) atp_problem handle UNRECOGNIZED_ATP_PROOF () => ([], SOME ProofUnparsable))) handle Timeout.TIMEOUT _ => (("", slice_timeout), ([], SOME TimedOut)) | ERROR msg => (("", Time.zeroTime), ([], SOME (UnknownError msg))) val outcome = (case outcome of NONE => (case used_facts_in_unsound_atp_proof ctxt (map fst facts) atp_proof of SOME facts => let val failure = UnsoundProof (is_type_enc_sound type_enc, sort string_ord facts) in if debug then (warning (string_of_atp_failure failure); NONE) else SOME failure end | NONE => (found_proof (); NONE)) | _ => outcome) in ((SOME key, value), (output, run_time, facts, atp_proof, outcome), SOME (format, type_enc)) end val timer = Timer.startRealTimer () fun maybe_run_slice slice (result as (cache, (_, run_time0, _, _, SOME _), _)) = let val time_left = timeout - Timer.checkRealTimer timer in if time_left <= Time.zeroTime then result else run_slice time_left cache slice |> (fn (cache, (output, run_time, used_from, atp_proof, outcome), format_type_enc) => (cache, (output, run_time0 + run_time, used_from, atp_proof, outcome), format_type_enc)) end | maybe_run_slice _ result = result in ((NONE, ([], Symtab.empty, [], Symtab.empty)), ("", Time.zeroTime, [], [], SOME InternalError), NONE) |> fold maybe_run_slice actual_slices end (* If the problem file has not been exported, remove it; otherwise, export the proof file too. *) fun clean_up () = if dest_dir = "" then (try File.rm prob_path; ()) else () fun export (_, (output, _, _, _, _), _) = - if dest_dir = "" then () - else File.write (Path.explode (Path.implode prob_path ^ "_proof")) output + let + val make_export_path = + Path.split_ext + #> apfst (Path.explode o suffix "_proof" o Path.implode) + #> swap + #> uncurry Path.ext + in + if dest_dir = "" then () + else File.write (make_export_path prob_path) output + end val ((_, (_, pool, lifted, sym_tab)), (output, run_time, used_from, atp_proof, outcome), SOME (format, type_enc)) = with_cleanup clean_up run () |> tap export val important_message = if mode = Normal andalso Random.random_range 0 (atp_important_message_keep_quotient - 1) = 0 then extract_important_message output else "" val (used_facts, preferred_methss, message) = (case outcome of NONE => let val used_facts = sort_by fst (used_facts_in_atp_proof ctxt (map fst used_from) atp_proof) val needs_full_types = is_typed_helper_used_in_atp_proof atp_proof val preferred_methss = (Metis_Method (NONE, NONE), bunches_of_proof_methods ctxt try0 smt_proofs needs_full_types (if atp_proof_prefers_lifting atp_proof then liftingN else opaque_liftingN)) in (used_facts, preferred_methss, fn preplay => let val _ = if verbose then writeln "Generating proof text..." else () fun isar_params () = let val metis_type_enc = if is_typed_helper_used_in_atp_proof atp_proof then SOME full_typesN else NONE val metis_lam_trans = if atp_proof_prefers_lifting atp_proof then SOME liftingN else NONE val atp_proof = atp_proof |> termify_atp_proof ctxt name format type_enc pool lifted sym_tab |> local_name = spassN ? introduce_spass_skolems |> factify_atp_proof (map fst used_from) hyp_ts concl_t in (verbose, (metis_type_enc, metis_lam_trans), preplay_timeout, compress, try0, minimize, atp_proof, goal) end val one_line_params = (preplay (), proof_banner mode name, subgoal, subgoal_count) val num_chained = length (#facts (Proof.goal state)) in proof_text ctxt debug isar_proofs smt_proofs isar_params num_chained one_line_params ^ (if important_message <> "" then "\n\nImportant message from Dr. Geoff Sutcliffe:\n" ^ important_message else "") end) end | SOME failure => ([], (Auto_Method (* dummy *), []), fn _ => string_of_atp_failure failure)) in {outcome = outcome, used_facts = used_facts, used_from = used_from, preferred_methss = preferred_methss, run_time = run_time, message = message} end end; diff --git a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_smt.ML b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_smt.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_prover_smt.ML +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_prover_smt.ML @@ -1,231 +1,237 @@ (* Title: HOL/Tools/Sledgehammer/sledgehammer_prover_smt.ML Author: Fabian Immler, TU Muenchen Author: Makarius Author: Jasmin Blanchette, TU Muenchen SMT solvers as Sledgehammer provers. *) signature SLEDGEHAMMER_PROVER_SMT = sig type stature = ATP_Problem_Generate.stature type mode = Sledgehammer_Prover.mode type prover = Sledgehammer_Prover.prover val smt_builtins : bool Config.T val smt_triggers : bool Config.T val smt_max_slices : int Config.T val smt_slice_fact_frac : real Config.T val smt_slice_time_frac : real Config.T val smt_slice_min_secs : int Config.T val is_smt_prover : Proof.context -> string -> bool val run_smt_solver : mode -> string -> prover end; structure Sledgehammer_Prover_SMT : SLEDGEHAMMER_PROVER_SMT = struct open ATP_Util open ATP_Proof open ATP_Problem_Generate open ATP_Proof_Reconstruct open Sledgehammer_Util open Sledgehammer_Proof_Methods open Sledgehammer_Isar open Sledgehammer_ATP_Systems open Sledgehammer_Prover val smt_builtins = Attrib.setup_config_bool \<^binding>\sledgehammer_smt_builtins\ (K true) val smt_triggers = Attrib.setup_config_bool \<^binding>\sledgehammer_smt_triggers\ (K true) val is_smt_prover = member (op =) o SMT_Config.available_solvers_of (* "SMT_Failure.Abnormal_Termination" carries the solver's return code. Until these are sorted out properly in the SMT module, we must interpret these here. *) val z3_failures = [(101, OutOfResources), (103, MalformedInput), (110, MalformedInput), (112, TimedOut)] val unix_failures = [(134, Crashed), (138, Crashed), (139, Crashed)] val smt_failures = z3_failures @ unix_failures fun failure_of_smt_failure (SMT_Failure.Counterexample genuine) = if genuine then Unprovable else GaveUp | failure_of_smt_failure SMT_Failure.Time_Out = TimedOut | failure_of_smt_failure (SMT_Failure.Abnormal_Termination code) = (case AList.lookup (op =) smt_failures code of SOME failure => failure | NONE => UnknownError ("Abnormal termination with exit code " ^ string_of_int code)) | failure_of_smt_failure SMT_Failure.Out_Of_Memory = OutOfResources | failure_of_smt_failure (SMT_Failure.Other_Failure s) = UnknownError s (* FUDGE *) val smt_max_slices = Attrib.setup_config_int \<^binding>\sledgehammer_smt_max_slices\ (K 8) val smt_slice_fact_frac = Attrib.setup_config_real \<^binding>\sledgehammer_smt_slice_fact_frac\ (K 0.667) val smt_slice_time_frac = Attrib.setup_config_real \<^binding>\sledgehammer_smt_slice_time_frac\ (K 0.333) val smt_slice_min_secs = Attrib.setup_config_int \<^binding>\sledgehammer_smt_slice_min_secs\ (K 3) val is_boring_builtin_typ = not o exists_subtype (member (op =) [\<^typ>\nat\, \<^typ>\int\, HOLogic.realT]) fun smt_filter_loop name ({debug, overlord, max_mono_iters, max_new_mono_instances, timeout, slice, - ...} : params) state goal i = + type_enc, ...} : params) state goal i = let + val (higher_order, nat_as_int) = + (case type_enc of + SOME s => (String.isSubstring "native_higher" s, String.isSubstring "arith" s) + | NONE => (false, false)) fun repair_context ctxt = ctxt |> Context.proof_map (SMT_Config.select_solver name) |> Config.put SMT_Config.verbose debug + |> Config.put SMT_Config.higher_order higher_order + |> Config.put SMT_Config.nat_as_int nat_as_int |> (if overlord then Config.put SMT_Config.debug_files (overlord_file_location_of_prover name |> (fn (path, name) => path ^ "/" ^ name)) else I) |> Config.put SMT_Config.infer_triggers (Config.get ctxt smt_triggers) |> not (Config.get ctxt smt_builtins) ? (SMT_Builtin.filter_builtins is_boring_builtin_typ #> Config.put SMT_Systems.z3_extensions false) |> repair_monomorph_context max_mono_iters default_max_mono_iters max_new_mono_instances default_max_new_mono_instances val state = Proof.map_context (repair_context) state val ctxt = Proof.context_of state val max_slices = if slice then Config.get ctxt smt_max_slices else 1 fun do_slice timeout slice outcome0 time_so_far (factss as (fact_filter, facts) :: _) = let val timer = Timer.startRealTimer () val slice_timeout = if slice < max_slices then let val ms = Time.toMilliseconds timeout in Int.min (ms, Int.max (1000 * Config.get ctxt smt_slice_min_secs, Real.ceil (Config.get ctxt smt_slice_time_frac * Real.fromInt ms))) |> Time.fromMilliseconds end else timeout val num_facts = length facts val _ = if debug then quote name ^ " slice " ^ string_of_int slice ^ " with " ^ string_of_int num_facts ^ " fact" ^ plural_s num_facts ^ " for " ^ string_of_time slice_timeout |> writeln else () val birth = Timer.checkRealTimer timer val filter_result as {outcome, ...} = SMT_Solver.smt_filter ctxt goal facts i slice_timeout handle exn => if Exn.is_interrupt exn orelse debug then Exn.reraise exn else {outcome = SOME (SMT_Failure.Other_Failure (Runtime.exn_message exn)), fact_ids = NONE, atp_proof = K []} val death = Timer.checkRealTimer timer val outcome0 = if is_none outcome0 then SOME outcome else outcome0 val time_so_far = time_so_far + (death - birth) val timeout = timeout - Timer.checkRealTimer timer val too_many_facts_perhaps = (case outcome of NONE => false | SOME (SMT_Failure.Counterexample _) => false | SOME SMT_Failure.Time_Out => slice_timeout <> timeout | SOME (SMT_Failure.Abnormal_Termination _) => true (* kind of *) | SOME SMT_Failure.Out_Of_Memory => true | SOME (SMT_Failure.Other_Failure _) => true) in if too_many_facts_perhaps andalso slice < max_slices andalso num_facts > 0 andalso timeout > Time.zeroTime then let val new_num_facts = Real.ceil (Config.get ctxt smt_slice_fact_frac * Real.fromInt num_facts) val factss as (new_fact_filter, _) :: _ = factss |> (fn (x :: xs) => xs @ [x]) |> app_hd (apsnd (take new_num_facts)) val show_filter = fact_filter <> new_fact_filter fun num_of_facts fact_filter num_facts = string_of_int num_facts ^ (if show_filter then " " ^ quote fact_filter else "") ^ " fact" ^ plural_s num_facts val _ = if debug then quote name ^ " invoked with " ^ num_of_facts fact_filter num_facts ^ ": " ^ string_of_atp_failure (failure_of_smt_failure (the outcome)) ^ " Retrying with " ^ num_of_facts new_fact_filter new_num_facts ^ "..." |> writeln else () in do_slice timeout (slice + 1) outcome0 time_so_far factss end else {outcome = if is_none outcome then NONE else the outcome0, filter_result = filter_result, used_from = facts, run_time = time_so_far} end in do_slice timeout 1 NONE Time.zeroTime end fun run_smt_solver mode name (params as {debug, verbose, isar_proofs, compress, try0, smt_proofs, minimize, preplay_timeout, ...}) ({state, goal, subgoal, subgoal_count, factss, found_proof, ...} : prover_problem) = let val thy = Proof.theory_of state val ctxt = Proof.context_of state val factss = map (apsnd (map (apsnd (Thm.transfer thy)))) factss val {outcome, filter_result = {fact_ids, atp_proof, ...}, used_from, run_time} = smt_filter_loop name params state goal subgoal factss val used_facts = (case fact_ids of NONE => map fst used_from | SOME ids => sort_by fst (map (fst o snd) ids)) val outcome = Option.map failure_of_smt_failure outcome val (preferred_methss, message) = (case outcome of NONE => let val _ = found_proof (); val preferred = if smt_proofs then SMT_Method (if name = "verit" then SMT_Verit "default" else SMT_Z3) else Metis_Method (NONE, NONE); val methss = bunches_of_proof_methods ctxt try0 smt_proofs false liftingN; in ((preferred, methss), fn preplay => let val _ = if verbose then writeln "Generating proof text..." else () fun isar_params () = (verbose, (NONE, NONE), preplay_timeout, compress, try0, minimize, atp_proof (), goal) val one_line_params = (preplay (), proof_banner mode name, subgoal, subgoal_count) val num_chained = length (#facts (Proof.goal state)) in proof_text ctxt debug isar_proofs smt_proofs isar_params num_chained one_line_params end) end | SOME failure => ((Auto_Method (* dummy *), []), fn _ => string_of_atp_failure failure)) in {outcome = outcome, used_facts = used_facts, used_from = used_from, preferred_methss = preferred_methss, run_time = run_time, message = message} end end; diff --git a/src/HOL/Wellfounded.thy b/src/HOL/Wellfounded.thy --- a/src/HOL/Wellfounded.thy +++ b/src/HOL/Wellfounded.thy @@ -1,969 +1,972 @@ (* Title: HOL/Wellfounded.thy Author: Tobias Nipkow Author: Lawrence C Paulson Author: Konrad Slind Author: Alexander Krauss Author: Andrei Popescu, TU Muenchen *) section \Well-founded Recursion\ theory Wellfounded imports Transitive_Closure begin subsection \Basic Definitions\ definition wf :: "('a \ 'a) set \ bool" where "wf r \ (\P. (\x. (\y. (y, x) \ r \ P y) \ P x) \ (\x. P x))" definition wfP :: "('a \ 'a \ bool) \ bool" where "wfP r \ wf {(x, y). r x y}" lemma wfP_wf_eq [pred_set_conv]: "wfP (\x y. (x, y) \ r) = wf r" by (simp add: wfP_def) lemma wfUNIVI: "(\P x. (\x. (\y. (y, x) \ r \ P y) \ P x) \ P x) \ wf r" unfolding wf_def by blast lemmas wfPUNIVI = wfUNIVI [to_pred] text \Restriction to domain \A\ and range \B\. If \r\ is well-founded over their intersection, then \wf r\.\ lemma wfI: assumes "r \ A \ B" and "\x P. \\x. (\y. (y, x) \ r \ P y) \ P x; x \ A; x \ B\ \ P x" shows "wf r" using assms unfolding wf_def by blast lemma wf_induct: assumes "wf r" and "\x. \y. (y, x) \ r \ P y \ P x" shows "P a" using assms unfolding wf_def by blast lemmas wfP_induct = wf_induct [to_pred] lemmas wf_induct_rule = wf_induct [rule_format, consumes 1, case_names less, induct set: wf] lemmas wfP_induct_rule = wf_induct_rule [to_pred, induct set: wfP] lemma wf_not_sym: "wf r \ (a, x) \ r \ (x, a) \ r" by (induct a arbitrary: x set: wf) blast lemma wf_asym: assumes "wf r" "(a, x) \ r" obtains "(x, a) \ r" by (drule wf_not_sym[OF assms]) lemma wf_not_refl [simp]: "wf r \ (a, a) \ r" by (blast elim: wf_asym) lemma wf_irrefl: assumes "wf r" obtains "(a, a) \ r" by (drule wf_not_refl[OF assms]) lemma wf_imp_irrefl: assumes "wf r" shows "irrefl r" using wf_irrefl [OF assms] by (auto simp add: irrefl_def) lemma wf_wellorderI: assumes wf: "wf {(x::'a::ord, y). x < y}" and lin: "OFCLASS('a::ord, linorder_class)" shows "OFCLASS('a::ord, wellorder_class)" apply (rule wellorder_class.intro [OF lin]) apply (simp add: wellorder_class.intro class.wellorder_axioms.intro wf_induct_rule [OF wf]) done lemma (in wellorder) wf: "wf {(x, y). x < y}" unfolding wf_def by (blast intro: less_induct) +lemma (in wellorder) wfP_less[simp]: "wfP (<)" + by (simp add: wf wfP_def) + subsection \Basic Results\ text \Point-free characterization of well-foundedness\ lemma wfE_pf: assumes wf: "wf R" and a: "A \ R `` A" shows "A = {}" proof - from wf have "x \ A" for x proof induct fix x assume "\y. (y, x) \ R \ y \ A" then have "x \ R `` A" by blast with a show "x \ A" by blast qed then show ?thesis by auto qed lemma wfI_pf: assumes a: "\A. A \ R `` A \ A = {}" shows "wf R" proof (rule wfUNIVI) fix P :: "'a \ bool" and x let ?A = "{x. \ P x}" assume "\x. (\y. (y, x) \ R \ P y) \ P x" then have "?A \ R `` ?A" by blast with a show "P x" by blast qed subsubsection \Minimal-element characterization of well-foundedness\ lemma wfE_min: assumes wf: "wf R" and Q: "x \ Q" obtains z where "z \ Q" "\y. (y, z) \ R \ y \ Q" using Q wfE_pf[OF wf, of Q] by blast lemma wfE_min': "wf R \ Q \ {} \ (\z. z \ Q \ (\y. (y, z) \ R \ y \ Q) \ thesis) \ thesis" using wfE_min[of R _ Q] by blast lemma wfI_min: assumes a: "\x Q. x \ Q \ \z\Q. \y. (y, z) \ R \ y \ Q" shows "wf R" proof (rule wfI_pf) fix A assume b: "A \ R `` A" have False if "x \ A" for x using a[OF that] b by blast then show "A = {}" by blast qed lemma wf_eq_minimal: "wf r \ (\Q x. x \ Q \ (\z\Q. \y. (y, z) \ r \ y \ Q))" apply (rule iffI) apply (blast intro: elim!: wfE_min) by (rule wfI_min) auto lemmas wfP_eq_minimal = wf_eq_minimal [to_pred] subsubsection \Well-foundedness of transitive closure\ lemma wf_trancl: assumes "wf r" shows "wf (r\<^sup>+)" proof - have "P x" if induct_step: "\x. (\y. (y, x) \ r\<^sup>+ \ P y) \ P x" for P x proof (rule induct_step) show "P y" if "(y, x) \ r\<^sup>+" for y using \wf r\ and that proof (induct x arbitrary: y) case (less x) note hyp = \\x' y'. (x', x) \ r \ (y', x') \ r\<^sup>+ \ P y'\ from \(y, x) \ r\<^sup>+\ show "P y" proof cases case base show "P y" proof (rule induct_step) fix y' assume "(y', y) \ r\<^sup>+" with \(y, x) \ r\ show "P y'" by (rule hyp [of y y']) qed next case step then obtain x' where "(x', x) \ r" and "(y, x') \ r\<^sup>+" by simp then show "P y" by (rule hyp [of x' y]) qed qed qed then show ?thesis unfolding wf_def by blast qed lemmas wfP_trancl = wf_trancl [to_pred] lemma wf_converse_trancl: "wf (r\) \ wf ((r\<^sup>+)\)" apply (subst trancl_converse [symmetric]) apply (erule wf_trancl) done text \Well-foundedness of subsets\ lemma wf_subset: "wf r \ p \ r \ wf p" by (simp add: wf_eq_minimal) fast lemmas wfP_subset = wf_subset [to_pred] text \Well-foundedness of the empty relation\ lemma wf_empty [iff]: "wf {}" by (simp add: wf_def) lemma wfP_empty [iff]: "wfP (\x y. False)" proof - have "wfP bot" by (fact wf_empty[to_pred bot_empty_eq2]) then show ?thesis by (simp add: bot_fun_def) qed lemma wf_Int1: "wf r \ wf (r \ r')" by (erule wf_subset) (rule Int_lower1) lemma wf_Int2: "wf r \ wf (r' \ r)" by (erule wf_subset) (rule Int_lower2) text \Exponentiation.\ lemma wf_exp: assumes "wf (R ^^ n)" shows "wf R" proof (rule wfI_pf) fix A assume "A \ R `` A" then have "A \ (R ^^ n) `` A" by (induct n) force+ with \wf (R ^^ n)\ show "A = {}" by (rule wfE_pf) qed text \Well-foundedness of \insert\.\ lemma wf_insert [iff]: "wf (insert (y,x) r) \ wf r \ (x,y) \ r\<^sup>*" (is "?lhs = ?rhs") proof assume ?lhs then show ?rhs by (blast elim: wf_trancl [THEN wf_irrefl] intro: rtrancl_into_trancl1 wf_subset rtrancl_mono [THEN subsetD]) next assume R: ?rhs then have R': "Q \ {} \ (\z\Q. \y. (y, z) \ r \ y \ Q)" for Q by (auto simp: wf_eq_minimal) show ?lhs unfolding wf_eq_minimal proof clarify fix Q :: "'a set" and q assume "q \ Q" then obtain a where "a \ Q" and a: "\y. (y, a) \ r \ y \ Q" using R by (auto simp: wf_eq_minimal) show "\z\Q. \y'. (y', z) \ insert (y, x) r \ y' \ Q" proof (cases "a=x") case True show ?thesis proof (cases "y \ Q") case True then obtain z where "z \ Q" "(z, y) \ r\<^sup>*" "\z'. (z', z) \ r \ z' \ Q \ (z', y) \ r\<^sup>*" using R' [of "{z \ Q. (z,y) \ r\<^sup>*}"] by auto with R show ?thesis by (rule_tac x="z" in bexI) (blast intro: rtrancl_trans) next case False then show ?thesis using a \a \ Q\ by blast qed next case False with a \a \ Q\ show ?thesis by blast qed qed qed subsubsection \Well-foundedness of image\ lemma wf_map_prod_image_Dom_Ran: fixes r:: "('a \ 'a) set" and f:: "'a \ 'b" assumes wf_r: "wf r" and inj: "\ a a'. a \ Domain r \ a' \ Range r \ f a = f a' \ a = a'" shows "wf (map_prod f f ` r)" proof (unfold wf_eq_minimal, clarify) fix B :: "'b set" and b::"'b" assume "b \ B" define A where "A = f -` B \ Domain r" show "\z\B. \y. (y, z) \ map_prod f f ` r \ y \ B" proof (cases "A = {}") case False then obtain a0 where "a0 \ A" and "\a. (a, a0) \ r \ a \ A" using wfE_min[OF wf_r] by auto thus ?thesis using inj unfolding A_def by (intro bexI[of _ "f a0"]) auto qed (insert \b \ B\, unfold A_def, auto) qed lemma wf_map_prod_image: "wf r \ inj f \ wf (map_prod f f ` r)" by(rule wf_map_prod_image_Dom_Ran) (auto dest: inj_onD) subsection \Well-Foundedness Results for Unions\ lemma wf_union_compatible: assumes "wf R" "wf S" assumes "R O S \ R" shows "wf (R \ S)" proof (rule wfI_min) fix x :: 'a and Q let ?Q' = "{x \ Q. \y. (y, x) \ R \ y \ Q}" assume "x \ Q" obtain a where "a \ ?Q'" by (rule wfE_min [OF \wf R\ \x \ Q\]) blast with \wf S\ obtain z where "z \ ?Q'" and zmin: "\y. (y, z) \ S \ y \ ?Q'" by (erule wfE_min) have "y \ Q" if "(y, z) \ S" for y proof from that have "y \ ?Q'" by (rule zmin) assume "y \ Q" with \y \ ?Q'\ obtain w where "(w, y) \ R" and "w \ Q" by auto from \(w, y) \ R\ \(y, z) \ S\ have "(w, z) \ R O S" by (rule relcompI) with \R O S \ R\ have "(w, z) \ R" .. with \z \ ?Q'\ have "w \ Q" by blast with \w \ Q\ show False by contradiction qed with \z \ ?Q'\ show "\z\Q. \y. (y, z) \ R \ S \ y \ Q" by blast qed text \Well-foundedness of indexed union with disjoint domains and ranges.\ lemma wf_UN: assumes r: "\i. i \ I \ wf (r i)" and disj: "\i j. \i \ I; j \ I; r i \ r j\ \ Domain (r i) \ Range (r j) = {}" shows "wf (\i\I. r i)" unfolding wf_eq_minimal proof clarify fix A and a :: "'b" assume "a \ A" show "\z\A. \y. (y, z) \ \(r ` I) \ y \ A" proof (cases "\i\I. \a\A. \b\A. (b, a) \ r i") case True then obtain i b c where ibc: "i \ I" "b \ A" "c \ A" "(c,b) \ r i" by blast have ri: "\Q. Q \ {} \ \z\Q. \y. (y, z) \ r i \ y \ Q" using r [OF \i \ I\] unfolding wf_eq_minimal by auto show ?thesis using ri [of "{a. a \ A \ (\b\A. (b, a) \ r i) }"] ibc disj by blast next case False with \a \ A\ show ?thesis by blast qed qed lemma wfP_SUP: "\i. wfP (r i) \ \i j. r i \ r j \ inf (Domainp (r i)) (Rangep (r j)) = bot \ wfP (\(range r))" by (rule wf_UN[to_pred]) simp_all lemma wf_Union: assumes "\r\R. wf r" and "\r\R. \s\R. r \ s \ Domain r \ Range s = {}" shows "wf (\R)" using assms wf_UN[of R "\i. i"] by simp text \ Intuition: We find an \R \ S\-min element of a nonempty subset \A\ by case distinction. \<^enum> There is a step \a \R\ b\ with \a, b \ A\. Pick an \R\-min element \z\ of the (nonempty) set \{a\A | \b\A. a \R\ b}\. By definition, there is \z' \ A\ s.t. \z \R\ z'\. Because \z\ is \R\-min in the subset, \z'\ must be \R\-min in \A\. Because \z'\ has an \R\-predecessor, it cannot have an \S\-successor and is thus \S\-min in \A\ as well. \<^enum> There is no such step. Pick an \S\-min element of \A\. In this case it must be an \R\-min element of \A\ as well. \ lemma wf_Un: "wf r \ wf s \ Domain r \ Range s = {} \ wf (r \ s)" using wf_union_compatible[of s r] by (auto simp: Un_ac) lemma wf_union_merge: "wf (R \ S) = wf (R O R \ S O R \ S)" (is "wf ?A = wf ?B") proof assume "wf ?A" with wf_trancl have wfT: "wf (?A\<^sup>+)" . moreover have "?B \ ?A\<^sup>+" by (subst trancl_unfold, subst trancl_unfold) blast ultimately show "wf ?B" by (rule wf_subset) next assume "wf ?B" show "wf ?A" proof (rule wfI_min) fix Q :: "'a set" and x assume "x \ Q" with \wf ?B\ obtain z where "z \ Q" and "\y. (y, z) \ ?B \ y \ Q" by (erule wfE_min) then have 1: "\y. (y, z) \ R O R \ y \ Q" and 2: "\y. (y, z) \ S O R \ y \ Q" and 3: "\y. (y, z) \ S \ y \ Q" by auto show "\z\Q. \y. (y, z) \ ?A \ y \ Q" proof (cases "\y. (y, z) \ R \ y \ Q") case True with \z \ Q\ 3 show ?thesis by blast next case False then obtain z' where "z'\Q" "(z', z) \ R" by blast have "\y. (y, z') \ ?A \ y \ Q" proof (intro allI impI) fix y assume "(y, z') \ ?A" then show "y \ Q" proof assume "(y, z') \ R" then have "(y, z) \ R O R" using \(z', z) \ R\ .. with 1 show "y \ Q" . next assume "(y, z') \ S" then have "(y, z) \ S O R" using \(z', z) \ R\ .. with 2 show "y \ Q" . qed qed with \z' \ Q\ show ?thesis .. qed qed qed lemma wf_comp_self: "wf R \ wf (R O R)" \ \special case\ by (rule wf_union_merge [where S = "{}", simplified]) subsection \Well-Foundedness of Composition\ text \Bachmair and Dershowitz 1986, Lemma 2. [Provided by Tjark Weber]\ lemma qc_wf_relto_iff: assumes "R O S \ (R \ S)\<^sup>* O R" \ \R quasi-commutes over S\ shows "wf (S\<^sup>* O R O S\<^sup>*) \ wf R" (is "wf ?S \ _") proof show "wf R" if "wf ?S" proof - have "R \ ?S" by auto with wf_subset [of ?S] that show "wf R" by auto qed next show "wf ?S" if "wf R" proof (rule wfI_pf) fix A assume A: "A \ ?S `` A" let ?X = "(R \ S)\<^sup>* `` A" have *: "R O (R \ S)\<^sup>* \ (R \ S)\<^sup>* O R" proof - have "(x, z) \ (R \ S)\<^sup>* O R" if "(y, z) \ (R \ S)\<^sup>*" and "(x, y) \ R" for x y z using that proof (induct y z) case rtrancl_refl then show ?case by auto next case (rtrancl_into_rtrancl a b c) then have "(x, c) \ ((R \ S)\<^sup>* O (R \ S)\<^sup>*) O R" using assms by blast then show ?case by simp qed then show ?thesis by auto qed then have "R O S\<^sup>* \ (R \ S)\<^sup>* O R" using rtrancl_Un_subset by blast then have "?S \ (R \ S)\<^sup>* O (R \ S)\<^sup>* O R" by (simp add: relcomp_mono rtrancl_mono) also have "\ = (R \ S)\<^sup>* O R" by (simp add: O_assoc[symmetric]) finally have "?S O (R \ S)\<^sup>* \ (R \ S)\<^sup>* O R O (R \ S)\<^sup>*" by (simp add: O_assoc[symmetric] relcomp_mono) also have "\ \ (R \ S)\<^sup>* O (R \ S)\<^sup>* O R" using * by (simp add: relcomp_mono) finally have "?S O (R \ S)\<^sup>* \ (R \ S)\<^sup>* O R" by (simp add: O_assoc[symmetric]) then have "(?S O (R \ S)\<^sup>*) `` A \ ((R \ S)\<^sup>* O R) `` A" by (simp add: Image_mono) moreover have "?X \ (?S O (R \ S)\<^sup>*) `` A" using A by (auto simp: relcomp_Image) ultimately have "?X \ R `` ?X" by (auto simp: relcomp_Image) then have "?X = {}" using \wf R\ by (simp add: wfE_pf) moreover have "A \ ?X" by auto ultimately show "A = {}" by simp qed qed corollary wf_relcomp_compatible: assumes "wf R" and "R O S \ S O R" shows "wf (S O R)" proof - have "R O S \ (R \ S)\<^sup>* O R" using assms by blast then have "wf (S\<^sup>* O R O S\<^sup>*)" by (simp add: assms qc_wf_relto_iff) then show ?thesis by (rule Wellfounded.wf_subset) blast qed subsection \Acyclic relations\ lemma wf_acyclic: "wf r \ acyclic r" by (simp add: acyclic_def) (blast elim: wf_trancl [THEN wf_irrefl]) lemmas wfP_acyclicP = wf_acyclic [to_pred] subsubsection \Wellfoundedness of finite acyclic relations\ lemma finite_acyclic_wf: assumes "finite r" "acyclic r" shows "wf r" using assms proof (induction r rule: finite_induct) case (insert x r) then show ?case by (cases x) simp qed simp lemma finite_acyclic_wf_converse: "finite r \ acyclic r \ wf (r\)" apply (erule finite_converse [THEN iffD2, THEN finite_acyclic_wf]) apply (erule acyclic_converse [THEN iffD2]) done text \ Observe that the converse of an irreflexive, transitive, and finite relation is again well-founded. Thus, we may employ it for well-founded induction. \ lemma wf_converse: assumes "irrefl r" and "trans r" and "finite r" shows "wf (r\)" proof - have "acyclic r" using \irrefl r\ and \trans r\ by (simp add: irrefl_def acyclic_irrefl) with \finite r\ show ?thesis by (rule finite_acyclic_wf_converse) qed lemma wf_iff_acyclic_if_finite: "finite r \ wf r = acyclic r" by (blast intro: finite_acyclic_wf wf_acyclic) subsection \\<^typ>\nat\ is well-founded\ lemma less_nat_rel: "(<) = (\m n. n = Suc m)\<^sup>+\<^sup>+" proof (rule ext, rule ext, rule iffI) fix n m :: nat show "(\m n. n = Suc m)\<^sup>+\<^sup>+ m n" if "m < n" using that proof (induct n) case 0 then show ?case by auto next case (Suc n) then show ?case by (auto simp add: less_Suc_eq_le le_less intro: tranclp.trancl_into_trancl) qed show "m < n" if "(\m n. n = Suc m)\<^sup>+\<^sup>+ m n" using that by (induct n) (simp_all add: less_Suc_eq_le reflexive le_less) qed definition pred_nat :: "(nat \ nat) set" where "pred_nat = {(m, n). n = Suc m}" definition less_than :: "(nat \ nat) set" where "less_than = pred_nat\<^sup>+" lemma less_eq: "(m, n) \ pred_nat\<^sup>+ \ m < n" unfolding less_nat_rel pred_nat_def trancl_def by simp lemma pred_nat_trancl_eq_le: "(m, n) \ pred_nat\<^sup>* \ m \ n" unfolding less_eq rtrancl_eq_or_trancl by auto lemma wf_pred_nat: "wf pred_nat" apply (unfold wf_def pred_nat_def) apply clarify apply (induct_tac x) apply blast+ done lemma wf_less_than [iff]: "wf less_than" by (simp add: less_than_def wf_pred_nat [THEN wf_trancl]) lemma trans_less_than [iff]: "trans less_than" by (simp add: less_than_def) lemma less_than_iff [iff]: "((x,y) \ less_than) = (xAccessible Part\ text \ Inductive definition of the accessible part \acc r\ of a relation; see also @{cite "paulin-tlca"}. \ inductive_set acc :: "('a \ 'a) set \ 'a set" for r :: "('a \ 'a) set" where accI: "(\y. (y, x) \ r \ y \ acc r) \ x \ acc r" abbreviation termip :: "('a \ 'a \ bool) \ 'a \ bool" where "termip r \ accp (r\\)" abbreviation termi :: "('a \ 'a) set \ 'a set" where "termi r \ acc (r\)" lemmas accpI = accp.accI lemma accp_eq_acc [code]: "accp r = (\x. x \ Wellfounded.acc {(x, y). r x y})" by (simp add: acc_def) text \Induction rules\ theorem accp_induct: assumes major: "accp r a" assumes hyp: "\x. accp r x \ \y. r y x \ P y \ P x" shows "P a" apply (rule major [THEN accp.induct]) apply (rule hyp) apply (rule accp.accI) apply auto done lemmas accp_induct_rule = accp_induct [rule_format, induct set: accp] theorem accp_downward: "accp r b \ r a b \ accp r a" by (cases rule: accp.cases) lemma not_accp_down: assumes na: "\ accp R x" obtains z where "R z x" and "\ accp R z" proof - assume a: "\z. R z x \ \ accp R z \ thesis" show thesis proof (cases "\z. R z x \ accp R z") case True then have "\z. R z x \ accp R z" by auto then have "accp R x" by (rule accp.accI) with na show thesis .. next case False then obtain z where "R z x" and "\ accp R z" by auto with a show thesis . qed qed lemma accp_downwards_aux: "r\<^sup>*\<^sup>* b a \ accp r a \ accp r b" by (erule rtranclp_induct) (blast dest: accp_downward)+ theorem accp_downwards: "accp r a \ r\<^sup>*\<^sup>* b a \ accp r b" by (blast dest: accp_downwards_aux) theorem accp_wfPI: "\x. accp r x \ wfP r" apply (rule wfPUNIVI) apply (rule_tac P = P in accp_induct) apply blast+ done theorem accp_wfPD: "wfP r \ accp r x" apply (erule wfP_induct_rule) apply (rule accp.accI) apply blast done theorem wfP_accp_iff: "wfP r = (\x. accp r x)" by (blast intro: accp_wfPI dest: accp_wfPD) text \Smaller relations have bigger accessible parts:\ lemma accp_subset: assumes "R1 \ R2" shows "accp R2 \ accp R1" proof (rule predicate1I) fix x assume "accp R2 x" then show "accp R1 x" proof (induct x) fix x assume "\y. R2 y x \ accp R1 y" with assms show "accp R1 x" by (blast intro: accp.accI) qed qed text \This is a generalized induction theorem that works on subsets of the accessible part.\ lemma accp_subset_induct: assumes subset: "D \ accp R" and dcl: "\x z. D x \ R z x \ D z" and "D x" and istep: "\x. D x \ (\z. R z x \ P z) \ P x" shows "P x" proof - from subset and \D x\ have "accp R x" .. then show "P x" using \D x\ proof (induct x) fix x assume "D x" and "\y. R y x \ D y \ P y" with dcl and istep show "P x" by blast qed qed text \Set versions of the above theorems\ lemmas acc_induct = accp_induct [to_set] lemmas acc_induct_rule = acc_induct [rule_format, induct set: acc] lemmas acc_downward = accp_downward [to_set] lemmas not_acc_down = not_accp_down [to_set] lemmas acc_downwards_aux = accp_downwards_aux [to_set] lemmas acc_downwards = accp_downwards [to_set] lemmas acc_wfI = accp_wfPI [to_set] lemmas acc_wfD = accp_wfPD [to_set] lemmas wf_acc_iff = wfP_accp_iff [to_set] lemmas acc_subset = accp_subset [to_set] lemmas acc_subset_induct = accp_subset_induct [to_set] subsection \Tools for building wellfounded relations\ text \Inverse Image\ lemma wf_inv_image [simp,intro!]: fixes f :: "'a \ 'b" assumes "wf r" shows "wf (inv_image r f)" proof (clarsimp simp: inv_image_def wf_eq_minimal) fix P and x::'a assume "x \ P" then obtain w where w: "w \ {w. \x::'a. x \ P \ f x = w}" by auto have *: "\Q u. u \ Q \ \z\Q. \y. (y, z) \ r \ y \ Q" using assms by (auto simp add: wf_eq_minimal) show "\z\P. \y. (f y, f z) \ r \ y \ P" using * [OF w] by auto qed text \Measure functions into \<^typ>\nat\\ definition measure :: "('a \ nat) \ ('a \ 'a) set" where "measure = inv_image less_than" lemma in_measure[simp, code_unfold]: "(x, y) \ measure f \ f x < f y" by (simp add:measure_def) lemma wf_measure [iff]: "wf (measure f)" unfolding measure_def by (rule wf_less_than [THEN wf_inv_image]) lemma wf_if_measure: "(\x. P x \ f(g x) < f x) \ wf {(y,x). P x \ y = g x}" for f :: "'a \ nat" using wf_measure[of f] unfolding measure_def inv_image_def less_than_def less_eq by (rule wf_subset) auto subsubsection \Lexicographic combinations\ definition lex_prod :: "('a \'a) set \ ('b \ 'b) set \ (('a \ 'b) \ ('a \ 'b)) set" (infixr "<*lex*>" 80) where "ra <*lex*> rb = {((a, b), (a', b')). (a, a') \ ra \ a = a' \ (b, b') \ rb}" lemma in_lex_prod[simp]: "((a, b), (a', b')) \ r <*lex*> s \ (a, a') \ r \ a = a' \ (b, b') \ s" by (auto simp:lex_prod_def) lemma wf_lex_prod [intro!]: assumes "wf ra" "wf rb" shows "wf (ra <*lex*> rb)" proof (rule wfI) fix z :: "'a \ 'b" and P assume * [rule_format]: "\u. (\v. (v, u) \ ra <*lex*> rb \ P v) \ P u" obtain x y where zeq: "z = (x,y)" by fastforce have "P(x,y)" using \wf ra\ proof (induction x arbitrary: y rule: wf_induct_rule) case (less x) note lessx = less show ?case using \wf rb\ less proof (induction y rule: wf_induct_rule) case (less y) show ?case by (force intro: * less.IH lessx) qed qed then show "P z" by (simp add: zeq) qed auto text \\<*lex*>\ preserves transitivity\ lemma trans_lex_prod [simp,intro!]: "trans R1 \ trans R2 \ trans (R1 <*lex*> R2)" unfolding trans_def lex_prod_def by blast lemma total_on_lex_prod [simp]: "total_on A r \ total_on B s \ total_on (A \ B) (r <*lex*> s)" by (auto simp: total_on_def) lemma asym_lex_prod: "\asym R; asym S\ \ asym (R <*lex*> S)" by (auto simp add: asym_iff lex_prod_def) lemma total_lex_prod [simp]: "total r \ total s \ total (r <*lex*> s)" by (auto simp: total_on_def) text \lexicographic combinations with measure functions\ definition mlex_prod :: "('a \ nat) \ ('a \ 'a) set \ ('a \ 'a) set" (infixr "<*mlex*>" 80) where "f <*mlex*> R = inv_image (less_than <*lex*> R) (\x. (f x, x))" lemma wf_mlex: "wf R \ wf (f <*mlex*> R)" and mlex_less: "f x < f y \ (x, y) \ f <*mlex*> R" and mlex_leq: "f x \ f y \ (x, y) \ R \ (x, y) \ f <*mlex*> R" and mlex_iff: "(x, y) \ f <*mlex*> R \ f x < f y \ f x = f y \ (x, y) \ R" by (auto simp: mlex_prod_def) text \Proper subset relation on finite sets.\ definition finite_psubset :: "('a set \ 'a set) set" where "finite_psubset = {(A, B). A \ B \ finite B}" lemma wf_finite_psubset[simp]: "wf finite_psubset" apply (unfold finite_psubset_def) apply (rule wf_measure [THEN wf_subset]) apply (simp add: measure_def inv_image_def less_than_def less_eq) apply (fast elim!: psubset_card_mono) done lemma trans_finite_psubset: "trans finite_psubset" by (auto simp: finite_psubset_def less_le trans_def) lemma in_finite_psubset[simp]: "(A, B) \ finite_psubset \ A \ B \ finite B" unfolding finite_psubset_def by auto text \max- and min-extension of order to finite sets\ inductive_set max_ext :: "('a \ 'a) set \ ('a set \ 'a set) set" for R :: "('a \ 'a) set" where max_extI[intro]: "finite X \ finite Y \ Y \ {} \ (\x. x \ X \ \y\Y. (x, y) \ R) \ (X, Y) \ max_ext R" lemma max_ext_wf: assumes wf: "wf r" shows "wf (max_ext r)" proof (rule acc_wfI, intro allI) show "M \ acc (max_ext r)" (is "_ \ ?W") for M proof (induct M rule: infinite_finite_induct) case empty show ?case by (rule accI) (auto elim: max_ext.cases) next case (insert a M) from wf \M \ ?W\ \finite M\ show "insert a M \ ?W" proof (induct arbitrary: M) fix M a assume "M \ ?W" assume [intro]: "finite M" assume hyp: "\b M. (b, a) \ r \ M \ ?W \ finite M \ insert b M \ ?W" have add_less: "M \ ?W \ (\y. y \ N \ (y, a) \ r) \ N \ M \ ?W" if "finite N" "finite M" for N M :: "'a set" using that by (induct N arbitrary: M) (auto simp: hyp) show "insert a M \ ?W" proof (rule accI) fix N assume Nless: "(N, insert a M) \ max_ext r" then have *: "\x. x \ N \ (x, a) \ r \ (\y \ M. (x, y) \ r)" by (auto elim!: max_ext.cases) let ?N1 = "{n \ N. (n, a) \ r}" let ?N2 = "{n \ N. (n, a) \ r}" have N: "?N1 \ ?N2 = N" by (rule set_eqI) auto from Nless have "finite N" by (auto elim: max_ext.cases) then have finites: "finite ?N1" "finite ?N2" by auto have "?N2 \ ?W" proof (cases "M = {}") case [simp]: True have Mw: "{} \ ?W" by (rule accI) (auto elim: max_ext.cases) from * have "?N2 = {}" by auto with Mw show "?N2 \ ?W" by (simp only:) next case False from * finites have N2: "(?N2, M) \ max_ext r" by (rule_tac max_extI[OF _ _ \M \ {}\]) auto with \M \ ?W\ show "?N2 \ ?W" by (rule acc_downward) qed with finites have "?N1 \ ?N2 \ ?W" by (rule add_less) simp then show "N \ ?W" by (simp only: N) qed qed next case infinite show ?case by (rule accI) (auto elim: max_ext.cases simp: infinite) qed qed lemma max_ext_additive: "(A, B) \ max_ext R \ (C, D) \ max_ext R \ (A \ C, B \ D) \ max_ext R" by (force elim!: max_ext.cases) definition min_ext :: "('a \ 'a) set \ ('a set \ 'a set) set" where "min_ext r = {(X, Y) | X Y. X \ {} \ (\y \ Y. (\x \ X. (x, y) \ r))}" lemma min_ext_wf: assumes "wf r" shows "wf (min_ext r)" proof (rule wfI_min) show "\m \ Q. (\n. (n, m) \ min_ext r \ n \ Q)" if nonempty: "x \ Q" for Q :: "'a set set" and x proof (cases "Q = {{}}") case True then show ?thesis by (simp add: min_ext_def) next case False with nonempty obtain e x where "x \ Q" "e \ x" by force then have eU: "e \ \Q" by auto with \wf r\ obtain z where z: "z \ \Q" "\y. (y, z) \ r \ y \ \Q" by (erule wfE_min) from z obtain m where "m \ Q" "z \ m" by auto from \m \ Q\ show ?thesis proof (intro rev_bexI allI impI) fix n assume smaller: "(n, m) \ min_ext r" with \z \ m\ obtain y where "y \ n" "(y, z) \ r" by (auto simp: min_ext_def) with z(2) show "n \ Q" by auto qed qed qed subsubsection \Bounded increase must terminate\ lemma wf_bounded_measure: fixes ub :: "'a \ nat" and f :: "'a \ nat" assumes "\a b. (b, a) \ r \ ub b \ ub a \ ub a \ f b \ f b > f a" shows "wf r" by (rule wf_subset[OF wf_measure[of "\a. ub a - f a"]]) (auto dest: assms) lemma wf_bounded_set: fixes ub :: "'a \ 'b set" and f :: "'a \ 'b set" assumes "\a b. (b,a) \ r \ finite (ub a) \ ub b \ ub a \ ub a \ f b \ f b \ f a" shows "wf r" apply (rule wf_bounded_measure[of r "\a. card (ub a)" "\a. card (f a)"]) apply (drule assms) apply (blast intro: card_mono finite_subset psubset_card_mono dest: psubset_eq[THEN iffD2]) done lemma finite_subset_wf: assumes "finite A" shows "wf {(X, Y). X \ Y \ Y \ A}" by (rule wf_subset[OF wf_finite_psubset[unfolded finite_psubset_def]]) (auto intro: finite_subset[OF _ assms]) hide_const (open) acc accp end diff --git a/src/Pure/Admin/build_release.scala b/src/Pure/Admin/build_release.scala --- a/src/Pure/Admin/build_release.scala +++ b/src/Pure/Admin/build_release.scala @@ -1,949 +1,952 @@ /* Title: Pure/Admin/build_release.scala Author: Makarius Build full Isabelle distribution from repository. */ package isabelle object Build_Release { /** release context **/ private def execute(dir: Path, script: String): Unit = Isabelle_System.bash(script, cwd = dir.file).check private def execute_tar(dir: Path, args: String, strip: Int = 0): Unit = Isabelle_System.gnutar(args, dir = dir, strip = strip).check private def bash_java_opens(args: String*): String = Bash.strings(args.toList.flatMap(arg => List("--add-opens", arg + "=ALL-UNNAMED"))) object Release_Context { def apply( target_dir: Path, release_name: String = "", components_base: Path = Components.default_components_base, progress: Progress = new Progress): Release_Context = { val date = Date.now() val dist_name = proper_string(release_name) getOrElse ("Isabelle_" + Date.Format.date(date)) val dist_dir = (target_dir + Path.explode("dist-" + dist_name)).absolute new Release_Context(release_name, dist_name, dist_dir, components_base, progress) } } class Release_Context private[Build_Release]( val release_name: String, val dist_name: String, val dist_dir: Path, val components_base: Path, val progress: Progress) { override def toString: String = dist_name val isabelle: Path = Path.explode(dist_name) val isabelle_dir: Path = dist_dir + isabelle val isabelle_archive: Path = dist_dir + isabelle.tar.gz val isabelle_library_archive: Path = dist_dir + Path.explode(dist_name + "_library.tar.gz") def other_isabelle(dir: Path): Other_Isabelle = Other_Isabelle(dir + isabelle, isabelle_identifier = dist_name + "-build", progress = progress) def make_announce(id: String): Unit = { if (release_name.isEmpty) { File.write(isabelle_dir + Path.explode("ANNOUNCE"), """ IMPORTANT NOTE ============== This is a snapshot of Isabelle/""" + id + """ from the repository. """) } } def make_contrib(): Unit = { Isabelle_System.make_directory(Components.contrib(isabelle_dir)) File.write(Components.contrib(isabelle_dir, name = "README"), """This directory contains add-on components that contribute to the main Isabelle distribution. Separate licensing conditions apply, see each directory individually. """) } def bundle_info(platform: Platform.Family.Value): Bundle_Info = platform match { case Platform.Family.linux_arm => Bundle_Info(platform, "Linux (ARM)", dist_name + "_linux_arm.tar.gz") case Platform.Family.linux => Bundle_Info(platform, "Linux", dist_name + "_linux.tar.gz") case Platform.Family.macos => Bundle_Info(platform, "macOS", dist_name + "_macos.tar.gz") case Platform.Family.windows => Bundle_Info(platform, "Windows", dist_name + ".exe") } } sealed case class Bundle_Info( platform: Platform.Family.Value, platform_description: String, name: String) { def path: Path = Path.explode(name) } /** release archive **/ val ISABELLE: Path = Path.basic("Isabelle") val ISABELLE_ID: Path = Path.explode("etc/ISABELLE_ID") val ISABELLE_TAGS: Path = Path.explode("etc/ISABELLE_TAGS") val ISABELLE_IDENTIFIER: Path = Path.explode("etc/ISABELLE_IDENTIFIER") object Release_Archive { def make(bytes: Bytes, rename: String = ""): Release_Archive = { Isabelle_System.with_tmp_dir("tmp")(dir => Isabelle_System.with_tmp_file("archive", ext = "tar.gz")(archive_path => { val isabelle_dir = Isabelle_System.make_directory(dir + ISABELLE) Bytes.write(archive_path, bytes) execute_tar(isabelle_dir, "-xzf " + File.bash_path(archive_path), strip = 1) val id = File.read(isabelle_dir + ISABELLE_ID) val tags = File.read(isabelle_dir + ISABELLE_TAGS) val identifier = File.read(isabelle_dir + ISABELLE_IDENTIFIER) val (bytes1, identifier1) = if (rename.isEmpty || rename == identifier) (bytes, identifier) else { File.write(isabelle_dir + ISABELLE_IDENTIFIER, rename) Isabelle_System.move_file(isabelle_dir, dir + Path.basic(rename)) execute_tar(dir, "-czf " + File.bash_path(archive_path) + " " + Bash.string(rename)) (Bytes.read(archive_path), rename) } new Release_Archive(bytes1, id, tags, identifier1) }) ) } def read(path: Path, rename: String = ""): Release_Archive = make(Bytes.read(path), rename = rename) def get(url: String, rename: String = "", progress: Progress = new Progress): Release_Archive = { val bytes = if (Path.is_wellformed(url)) Bytes.read(Path.explode(url)) else Isabelle_System.download(url, progress = progress).bytes make(bytes, rename = rename) } } case class Release_Archive private[Build_Release]( bytes: Bytes, id: String, tags: String, identifier: String) { override def toString: String = identifier } /** generated content **/ /* bundled components */ class Bundled(platform: Option[Platform.Family.Value] = None) { def detect(s: String): Boolean = s.startsWith("#bundled") && !s.startsWith("#bundled ") def apply(name: String): String = "#bundled" + (platform match { case None => "" case Some(plat) => "-" + plat }) + ":" + name private val Pattern1 = ("""^#bundled:(.*)$""").r private val Pattern2 = ("""^#bundled-(.*):(.*)$""").r def unapply(s: String): Option[String] = s match { case Pattern1(name) => Some(name) case Pattern2(Platform.Family(plat), name) if platform == Some(plat) => Some(name) case _ => None } } def record_bundled_components(dir: Path): Unit = { val catalogs = List("main", "bundled").map((_, new Bundled())) ::: Platform.Family.list.flatMap(platform => List(platform.toString, "bundled-" + platform.toString). map((_, new Bundled(platform = Some(platform))))) File.append(Components.components(dir), terminate_lines("#bundled components" :: (for { (catalog, bundled) <- catalogs.iterator path = Components.admin(dir) + Path.basic(catalog) if path.is_file line <- split_lines(File.read(path)) if line.nonEmpty && !line.startsWith("#") } yield bundled(line)).toList)) } def get_bundled_components(dir: Path, platform: Platform.Family.Value): (List[String], String) = { val Bundled = new Bundled(platform = Some(platform)) val components = for { Bundled(name) <- Components.read_components(dir) } yield name val jdk_component = components.find(_.startsWith("jdk")) getOrElse error("Missing jdk component") (components, jdk_component) } def activate_components( dir: Path, platform: Platform.Family.Value, more_names: List[String]): Unit = { def contrib_name(name: String): String = Components.contrib(name = name).implode val Bundled = new Bundled(platform = Some(platform)) Components.write_components(dir, Components.read_components(dir).flatMap(line => line match { case Bundled(name) => if (Components.check_dir(Components.contrib(dir, name))) Some(contrib_name(name)) else None case _ => if (Bundled.detect(line)) None else Some(line) }) ::: more_names.map(contrib_name)) } /** build release **/ /* build heaps */ private def build_heaps( options: Options, platform: Platform.Family.Value, build_sessions: List[String], local_dir: Path): Unit = { val server_option = "build_host_" + platform.toString val ssh = options.string(server_option) match { case "" => if (Platform.family == platform) SSH.Local else error("Undefined option " + server_option + ": cannot build heaps") case SSH.Target(user, host) => SSH.open_session(options, host = host, user = user) case s => error("Malformed option " + server_option + ": " + quote(s)) } try { Isabelle_System.with_tmp_file("tmp", ext = "tar")(local_tmp_tar => { execute_tar(local_dir, "-cf " + File.bash_path(local_tmp_tar) + " .") ssh.with_tmp_dir(remote_dir => { val remote_tmp_tar = remote_dir + Path.basic("tmp.tar") ssh.write_file(remote_tmp_tar, local_tmp_tar) val remote_commands = List( "cd " + File.bash_path(remote_dir), "tar -xf tmp.tar", "bin/isabelle build -o system_heaps -b -- " + Bash.strings(build_sessions), "tar -cf tmp.tar heaps") ssh.execute(remote_commands.mkString(" && "), settings = false).check ssh.read_file(remote_tmp_tar, local_tmp_tar) }) execute_tar(local_dir, "-xf " + File.bash_path(local_tmp_tar)) }) } finally { ssh.close() } } /* Isabelle application */ def make_isabelle_options(path: Path, options: List[String], line_ending: String = "\n"): Unit = { val title = "# Java runtime options" File.write(path, (title :: options).map(_ + line_ending).mkString) } def make_isabelle_app( platform: Platform.Family.Value, isabelle_target: Path, isabelle_name: String, jdk_component: String, classpath: List[Path], dock_icon: Boolean = false): Unit = { val script = """#!/usr/bin/env bash # # Author: Makarius # # Main Isabelle application script. # minimal Isabelle environment ISABELLE_HOME="$(cd "$(dirname "$0")"; cd "$(pwd -P)/../.."; pwd)" source "$ISABELLE_HOME/lib/scripts/isabelle-platform" #paranoia settings -- avoid intrusion of alien options unset "_JAVA_OPTIONS" unset "JAVA_TOOL_OPTIONS" #paranoia settings -- avoid problems of Java/Swing versus XIM/IBus etc. unset XMODIFIERS COMPONENT="$ISABELLE_HOME/contrib/""" + jdk_component + """" source "$COMPONENT/etc/settings" # main declare -a JAVA_OPTIONS=($(grep -v '^#' "$ISABELLE_HOME/Isabelle.options")) "$ISABELLE_HOME/bin/isabelle" env "$ISABELLE_HOME/lib/scripts/java-gui-setup" exec "$ISABELLE_JDK_HOME/bin/java" \ "-Disabelle.root=$ISABELLE_HOME" "${JAVA_OPTIONS[@]}" \ -classpath """" + classpath.map(p => "$ISABELLE_HOME/" + p.implode).mkString(":") + """" \ "-splash:$ISABELLE_HOME/lib/logo/isabelle.gif" \ """ + (if (dock_icon) """"-Xdock:icon=$ISABELLE_HOME/lib/logo/isabelle_transparent-128.png" \ """ else "") + """isabelle.jedit.Main "$@" """ val script_path = isabelle_target + Path.explode("lib/scripts/Isabelle_app") File.write(script_path, script) File.set_executable(script_path, true) val component_dir = isabelle_target + Path.explode("contrib/Isabelle_app") Isabelle_System.move_file( component_dir + Path.explode(Platform.Family.standard(platform)) + Path.explode("Isabelle"), isabelle_target + Path.explode(isabelle_name)) Isabelle_System.rm_tree(component_dir) } def make_isabelle_plist(path: Path, isabelle_name: String, isabelle_rev: String): Unit = { File.write(path, """ CFBundleDevelopmentRegion English CFBundleIconFile isabelle.icns CFBundleIdentifier de.tum.in.isabelle CFBundleDisplayName """ + isabelle_name + """ CFBundleInfoDictionaryVersion 6.0 CFBundleName """ + isabelle_name + """ CFBundlePackageType APPL CFBundleShortVersionString """ + isabelle_name + """ CFBundleSignature ???? CFBundleVersion """ + isabelle_rev + """ NSHumanReadableCopyright LSMinimumSystemVersion 10.11 LSApplicationCategoryType public.app-category.developer-tools NSHighResolutionCapable true NSSupportsAutomaticGraphicsSwitching true CFBundleDocumentTypes CFBundleTypeExtensions thy CFBundleTypeIconFile theory.icns CFBundleTypeName Isabelle theory file CFBundleTypeRole Editor LSTypeIsPackage """) } /* main */ def use_release_archive( context: Release_Context, archive: Release_Archive, id: String = ""): Unit = { if (id.nonEmpty && id != archive.id) { error("Mismatch of release identification " + id + " vs. archive " + archive.id) } if (!context.isabelle_archive.is_file || Bytes.read(context.isabelle_archive) != archive.bytes) { Bytes.write(context.isabelle_archive, archive.bytes) } } def build_release_archive( context: Release_Context, version: String, parallel_jobs: Int = 1): Unit = { val progress = context.progress val hg = Mercurial.repository(Path.ISABELLE_HOME) val id = try { hg.id(version) } catch { case ERROR(msg) => cat_error("Bad repository version: " + version, msg) } if (context.isabelle_archive.is_file) { progress.echo_warning("Found existing release archive: " + context.isabelle_archive) use_release_archive(context, Release_Archive.read(context.isabelle_archive), id = id) } else { progress.echo_warning("Preparing release " + context.dist_name + " ...") Isabelle_System.new_directory(context.dist_dir) hg.archive(context.isabelle_dir.expand.implode, rev = id, options = "--type files") for (name <- List(".hg_archival.txt", ".hgtags", ".hgignore", "README_REPOSITORY")) { (context.isabelle_dir + Path.explode(name)).file.delete } File.write(context.isabelle_dir + ISABELLE_ID, id) File.write(context.isabelle_dir + ISABELLE_TAGS, hg.tags(rev = id)) File.write(context.isabelle_dir + ISABELLE_IDENTIFIER, context.dist_name) context.make_announce(id) context.make_contrib() execute(context.isabelle_dir, """find . -print | xargs chmod -f u+rw""") record_bundled_components(context.isabelle_dir) /* build tools and documentation */ val other_isabelle = context.other_isabelle(context.dist_dir) other_isabelle.init_settings( other_isabelle.init_components( components_base = context.components_base, catalogs = List("main"))) other_isabelle.resolve_components(echo = true) try { other_isabelle.bash( "export CLASSPATH=" + Bash.string(other_isabelle.getenv("ISABELLE_CLASSPATH")) + "\n" + "bin/isabelle jedit -b", echo = true).check } catch { case ERROR(msg) => cat_error("Failed to build tools:", msg) } try { other_isabelle.bash( "bin/isabelle build_doc -a -o system_heaps -j " + parallel_jobs, echo = true).check } catch { case ERROR(msg) => cat_error("Failed to build documentation:", msg) } other_isabelle.make_news() for (name <- List("Admin", "browser_info", "heaps")) { Isabelle_System.rm_tree(other_isabelle.isabelle_home + Path.explode(name)) } other_isabelle.cleanup() progress.echo_warning("Creating release archive " + context.isabelle_archive + " ...") execute(context.dist_dir, """chmod -R a+r . && chmod -R u+w . && chmod -R g=o .""") execute(context.dist_dir, """find . -type f "(" -name "*.thy" -o -name "*.ML" -o -name "*.scala" ")" -print | xargs chmod -f u-w""") execute_tar(context.dist_dir, "-czf " + File.bash_path(context.isabelle_archive) + " " + Bash.string(context.dist_name)) } } def default_platform_families: List[Platform.Family.Value] = Platform.Family.list0 def build_release( options: Options, context: Release_Context, afp_rev: String = "", platform_families: List[Platform.Family.Value] = default_platform_families, more_components: List[Path] = Nil, website: Option[Path] = None, build_sessions: List[String] = Nil, build_library: Boolean = false, parallel_jobs: Int = 1): Unit = { val progress = context.progress /* release directory */ val archive = Release_Archive.read(context.isabelle_archive) for (path <- List(context.isabelle, ISABELLE)) { Isabelle_System.rm_tree(context.dist_dir + path) } Isabelle_System.with_tmp_file("archive", ext = "tar.gz")(archive_path => { Bytes.write(archive_path, archive.bytes) val extract = List("README", "NEWS", "ANNOUNCE", "COPYRIGHT", "CONTRIBUTORS", "doc"). map(name => context.dist_name + "/" + name) execute_tar(context.dist_dir, "-xzf " + File.bash_path(archive_path) + " " + Bash.strings(extract)) }) Isabelle_System.symlink(Path.explode(context.dist_name), context.dist_dir + ISABELLE) /* make application bundles */ val bundle_infos = platform_families.map(context.bundle_info) for (bundle_info <- bundle_infos) { val isabelle_name = context.dist_name val platform = bundle_info.platform progress.echo("\nApplication bundle for " + platform) Isabelle_System.with_tmp_dir("build_release")(tmp_dir => { // release archive execute_tar(tmp_dir, "-xzf " + File.bash_path(context.isabelle_archive)) val other_isabelle = context.other_isabelle(tmp_dir) val isabelle_target = other_isabelle.isabelle_home // bundled components progress.echo("Bundled components:") val contrib_dir = Components.contrib(isabelle_target) val (bundled_components, jdk_component) = get_bundled_components(isabelle_target, platform) Components.resolve(context.components_base, bundled_components, target_dir = Some(contrib_dir), copy_dir = Some(context.dist_dir + Path.explode("contrib")), progress = progress) val more_components_names = more_components.map(Components.unpack(contrib_dir, _, progress = progress)) activate_components(isabelle_target, platform, more_components_names) // Java parameters val java_options: List[String] = (for { variable <- List( "ISABELLE_JAVA_SYSTEM_OPTIONS", "JEDIT_JAVA_SYSTEM_OPTIONS", "JEDIT_JAVA_OPTIONS") opt <- Word.explode(other_isabelle.getenv(variable)) } yield { val s = "-Dapple.awt.application.name=" if (opt.startsWith(s)) s + isabelle_name else opt }) ::: List("-Disabelle.jedit_server=" + isabelle_name) val classpath: List[Path] = { val base = isabelle_target.absolute val classpath1 = Path.split(other_isabelle.getenv("ISABELLE_CLASSPATH")) val classpath2 = Path.split(other_isabelle.getenv("ISABELLE_SETUP_CLASSPATH")) (classpath1 ::: classpath2).map(path => { val abs_path = path.absolute File.relative_path(base, abs_path) match { case Some(rel_path) => rel_path case None => error("Bad classpath element: " + abs_path) } }) } val jedit_options = Path.explode("src/Tools/jEdit/etc/options") val jedit_props = Path.explode(other_isabelle.getenv("JEDIT_HOME") + "/properties/jEdit.props") // build heaps if (build_sessions.nonEmpty) { progress.echo("Building heaps " + commas_quote(build_sessions) + " ...") build_heaps(options, platform, build_sessions, isabelle_target) } // application bundling Components.purge(contrib_dir, platform) platform match { case Platform.Family.linux_arm | Platform.Family.linux => File.change(isabelle_target + jedit_options, _.replaceAll("jedit_reset_font_size : int =.*", "jedit_reset_font_size : int = 24")) File.change(isabelle_target + jedit_props, _.replaceAll("console.fontsize=.*", "console.fontsize=18") .replaceAll("helpviewer.fontsize=.*", "helpviewer.fontsize=18") .replaceAll("metal.primary.fontsize=.*", "metal.primary.fontsize=18") .replaceAll("metal.secondary.fontsize=.*", "metal.secondary.fontsize=18") .replaceAll("view.fontsize=.*", "view.fontsize=24") .replaceAll("view.gutter.fontsize=.*", "view.gutter.fontsize=16")) make_isabelle_options( isabelle_target + Path.explode("Isabelle.options"), java_options) make_isabelle_app(platform, isabelle_target, isabelle_name, jdk_component, classpath) progress.echo("Packaging " + bundle_info.name + " ...") execute_tar(tmp_dir, "-czf " + File.bash_path(context.dist_dir + bundle_info.path) + " " + Bash.string(isabelle_name)) case Platform.Family.macos => File.change(isabelle_target + jedit_props, _.replaceAll("delete-line.shortcut=.*", "delete-line.shortcut=C+d") .replaceAll("delete.shortcut2=.*", "delete.shortcut2=A+d")) // macOS application bundle val app_contents = isabelle_target + Path.explode("Contents") for (icon <- List("lib/logo/isabelle.icns", "lib/logo/theory.icns")) { Isabelle_System.copy_file(isabelle_target + Path.explode(icon), Isabelle_System.make_directory(app_contents + Path.explode("Resources"))) } make_isabelle_plist( app_contents + Path.explode("Info.plist"), isabelle_name, archive.id) make_isabelle_app(platform, isabelle_target, isabelle_name, jdk_component, classpath, dock_icon = true) val isabelle_options = Path.explode("Isabelle.options") make_isabelle_options( isabelle_target + isabelle_options, java_options ::: List("-Disabelle.app=true")) // application archive progress.echo("Packaging " + bundle_info.name + " ...") val isabelle_app = Path.explode(isabelle_name + ".app") Isabelle_System.move_file(tmp_dir + Path.explode(isabelle_name), tmp_dir + isabelle_app) execute_tar(tmp_dir, "-czf " + File.bash_path(context.dist_dir + bundle_info.path) + " " + File.bash_path(isabelle_app)) case Platform.Family.windows => File.change(isabelle_target + jedit_props, _.replaceAll("foldPainter=.*", "foldPainter=Square")) // application launcher Isabelle_System.move_file(isabelle_target + Path.explode("contrib/windows_app"), tmp_dir) val app_template = Path.explode("~~/Admin/Windows/launch4j") make_isabelle_options( isabelle_target + Path.explode(isabelle_name + ".l4j.ini"), java_options, line_ending = "\r\n") val isabelle_xml = Path.explode("isabelle.xml") val isabelle_exe = bundle_info.path File.write(tmp_dir + isabelle_xml, File.read(app_template + isabelle_xml) .replace("{ISABELLE_NAME}", isabelle_name) .replace("{OUTFILE}", File.platform_path(isabelle_target + isabelle_exe)) .replace("{ICON}", File.platform_path(app_template + Path.explode("isabelle_transparent.ico"))) .replace("{SPLASH}", File.platform_path(app_template + Path.explode("isabelle.bmp"))) .replace("{CLASSPATH}", cat_lines(classpath.map(cp => " %EXEDIR%\\" + File.platform_path(cp).replace('/', '\\') + ""))) .replace("\\jdk\\", "\\" + jdk_component + "\\")) val java_opts = bash_java_opens( "java.base/java.io", "java.base/java.lang", "java.base/java.lang.reflect", "java.base/java.text", "java.base/java.util", "java.desktop/java.awt.font") val launch4j_jar = Path.explode("windows_app/launch4j-" + Platform.family + "/launch4j.jar") execute(tmp_dir, cat_lines(List( "export LAUNCH4J=" + File.bash_platform_path(launch4j_jar), "isabelle java " + java_opts + " -jar \"$LAUNCH4J\" isabelle.xml"))) Isabelle_System.copy_file(app_template + Path.explode("manifest.xml"), isabelle_target + isabelle_exe.ext("manifest")) // Cygwin setup val cygwin_template = Path.explode("~~/Admin/Windows/Cygwin") Isabelle_System.copy_file(cygwin_template + Path.explode("Cygwin-Terminal.bat"), isabelle_target) val cygwin_mirror = File.read(isabelle_target + Path.explode("contrib/cygwin/isabelle/cygwin_mirror")) val cygwin_bat = Path.explode("Cygwin-Setup.bat") File.write(isabelle_target + cygwin_bat, File.read(cygwin_template + cygwin_bat).replace("{MIRROR}", cygwin_mirror)) File.set_executable(isabelle_target + cygwin_bat, true) for (name <- List("isabelle/postinstall", "isabelle/rebaseall")) { val path = Path.explode(name) Isabelle_System.copy_file(cygwin_template + path, isabelle_target + Path.explode("contrib/cygwin") + path) } execute(isabelle_target, """find . -type f -not -name "*.exe" -not -name "*.dll" """ + (if (Platform.is_macos) "-perm +100" else "-executable") + " -print0 > contrib/cygwin/isabelle/executables") execute(isabelle_target, """find . -type l -exec echo "{}" ";" -exec readlink "{}" ";" """ + """> contrib/cygwin/isabelle/symlinks""") execute(isabelle_target, """find . -type l -exec rm "{}" ";" """) File.write(isabelle_target + Path.explode("contrib/cygwin/isabelle/uninitialized"), "") // executable archive (self-extracting 7z) val archive_name = isabelle_name + ".7z" val exe_archive = tmp_dir + Path.explode(archive_name) exe_archive.file.delete progress.echo("Packaging " + archive_name + " ...") execute(tmp_dir, "7z -y -bd a " + File.bash_path(exe_archive) + " " + Bash.string(isabelle_name)) if (!exe_archive.is_file) error("Failed to create archive: " + exe_archive) val sfx_exe = tmp_dir + Path.explode("windows_app/7zsd_All_x64.sfx") val sfx_txt = File.read(Path.explode("~~/Admin/Windows/Installer/sfx.txt")) .replace("{ISABELLE_NAME}", isabelle_name) Bytes.write(context.dist_dir + isabelle_exe, Bytes.read(sfx_exe) + Bytes(sfx_txt) + Bytes.read(exe_archive)) File.set_executable(context.dist_dir + isabelle_exe, true) } }) progress.echo("DONE") } /* minimal website */ for (dir <- website) { val website_platform_bundles = for { bundle_info <- bundle_infos if (context.dist_dir + bundle_info.path).is_file } yield (bundle_info.name, bundle_info) val isabelle_link = HTML.link(Isabelle_System.isabelle_repository.changeset(archive.id), HTML.text("Isabelle/" + archive.id)) val afp_link = HTML.link(Isabelle_System.afp_repository.changeset(afp_rev), HTML.text("AFP/" + afp_rev)) HTML.write_document(dir, "index.html", List(HTML.title(context.dist_name)), List( HTML.section(context.dist_name), HTML.subsection("Downloads"), HTML.itemize( List(HTML.link(context.dist_name + ".tar.gz", HTML.text("Source archive"))) :: website_platform_bundles.map({ case (bundle, bundle_info) => List(HTML.link(bundle, HTML.text(bundle_info.platform_description + " bundle"))) })), HTML.subsection("Repositories"), HTML.itemize( List(List(isabelle_link)) ::: (if (afp_rev == "") Nil else List(List(afp_link)))))) Isabelle_System.copy_file(context.isabelle_archive, dir) for ((bundle, _) <- website_platform_bundles) { Isabelle_System.copy_file(context.dist_dir + Path.explode(bundle), dir) } } /* HTML library */ if (build_library) { if (context.isabelle_library_archive.is_file) { progress.echo_warning("Library archive already exists: " + context.isabelle_library_archive) } else { Isabelle_System.with_tmp_dir("build_release")(tmp_dir => { val bundle = context.dist_dir + Path.explode(context.dist_name + "_" + Platform.family + ".tar.gz") execute_tar(tmp_dir, "-xzf " + File.bash_path(bundle)) val other_isabelle = context.other_isabelle(tmp_dir) + Isabelle_System.make_directory(other_isabelle.etc) + File.write(other_isabelle.etc_settings, "ML_OPTIONS=\"--minheap 1000 --maxheap 4000\"\n") + other_isabelle.bash("bin/isabelle build -f -j " + parallel_jobs + " -o browser_info -o document=pdf -o document_variants=document:outline=/proof,/ML" + " -o system_heaps -c -a -d '~~/src/Benchmarks'", echo = true).check other_isabelle.isabelle_home_user.file.delete execute(tmp_dir, "chmod -R a+r " + Bash.string(context.dist_name)) execute(tmp_dir, "chmod -R g=o " + Bash.string(context.dist_name)) execute_tar(tmp_dir, "-czf " + File.bash_path(context.isabelle_library_archive) + " " + Bash.string(context.dist_name + "/browser_info")) }) } } } /** command line entry point **/ def main(args: Array[String]): Unit = { Command_Line.tool { var afp_rev = "" var components_base: Path = Components.default_components_base var target_dir = Path.current var release_name = "" var source_archive = "" var website: Option[Path] = None var build_sessions: List[String] = Nil var more_components: List[Path] = Nil var parallel_jobs = 1 var build_library = false var options = Options.init() var platform_families = default_platform_families var rev = "" val getopts = Getopts(""" Usage: Admin/build_release [OPTIONS] Options are: -A REV corresponding AFP changeset id -C DIR base directory for Isabelle components (default: """ + Components.default_components_base + """) -D DIR target directory (default ".") -R RELEASE explicit release name -S ARCHIVE use existing source archive (file or URL) -W WEBSITE produce minimal website in given directory -b SESSIONS build platform-specific session images (separated by commas) -c ARCHIVE clean bundling with additional component .tar.gz archive -j INT maximum number of parallel jobs (default 1) -l build library -o OPTION override Isabelle system OPTION (via NAME=VAL or NAME) -p NAMES platform families (default: """ + default_platform_families.mkString(",") + """) -r REV Mercurial changeset id (default: ARCHIVE or RELEASE or tip) Build Isabelle release in base directory, using the local repository clone. """, "A:" -> (arg => afp_rev = arg), "C:" -> (arg => components_base = Path.explode(arg)), "D:" -> (arg => target_dir = Path.explode(arg)), "R:" -> (arg => release_name = arg), "S:" -> (arg => source_archive = arg), "W:" -> (arg => website = Some(Path.explode(arg))), "b:" -> (arg => build_sessions = space_explode(',', arg)), "c:" -> (arg => { val path = Path.explode(arg) Components.Archive.get_name(path.file_name) more_components = more_components ::: List(path) }), "j:" -> (arg => parallel_jobs = Value.Int.parse(arg)), "l" -> (_ => build_library = true), "o:" -> (arg => options = options + arg), "p:" -> (arg => platform_families = space_explode(',', arg).map(Platform.Family.parse)), "r:" -> (arg => rev = arg)) val more_args = getopts(args) if (more_args.nonEmpty) getopts.usage() if (platform_families.contains(Platform.Family.windows) && !Isabelle_System.bash("7z i").ok) error("Building for windows requires 7z") val progress = new Console_Progress() def make_context(name: String): Release_Context = Release_Context(target_dir, release_name = name, components_base = components_base, progress = progress) val context = if (source_archive.isEmpty) { val context = make_context(release_name) val version = proper_string(rev) orElse proper_string(release_name) getOrElse "tip" build_release_archive(context, version, parallel_jobs = parallel_jobs) context } else { val archive = Release_Archive.get(source_archive, rename = release_name, progress = progress) val context = make_context(archive.identifier) Isabelle_System.make_directory(context.dist_dir) use_release_archive(context, archive, id = rev) context } build_release(options, context, afp_rev = afp_rev, platform_families = platform_families, more_components = more_components, build_sessions = build_sessions, build_library = build_library, parallel_jobs = parallel_jobs, website = website) } } } diff --git a/src/Pure/Admin/build_status.scala b/src/Pure/Admin/build_status.scala --- a/src/Pure/Admin/build_status.scala +++ b/src/Pure/Admin/build_status.scala @@ -1,625 +1,625 @@ /* Title: Pure/Admin/build_status.scala Author: Makarius Present recent build status information from database. */ package isabelle object Build_Status { /* defaults */ val default_target_dir = Path.explode("build_status") val default_image_size = (800, 600) val default_history = 30 def default_profiles: List[Profile] = Jenkins.build_status_profiles ::: Isabelle_Cronjob.build_status_profiles /* data profiles */ sealed case class Profile( description: String, history: Int = 0, afp: Boolean = false, bulky: Boolean = false, sql: String) { def days(options: Options): Int = options.int("build_log_history") max history def stretch(options: Options): Double = (days(options) max default_history min (default_history * 5)).toDouble / default_history def select(options: Options, columns: List[SQL.Column], only_sessions: Set[String]): SQL.Source = { Build_Log.Data.universal_table.select(columns, distinct = true, sql = "WHERE " + Build_Log.Data.pull_date(afp) + " > " + Build_Log.Data.recent_time(days(options)) + " AND " + SQL.member(Build_Log.Data.status.ident, List( Build_Log.Session_Status.finished.toString, Build_Log.Session_Status.failed.toString)) + (if (only_sessions.isEmpty) "" else " AND " + SQL.member(Build_Log.Data.session_name.ident, only_sessions)) + " AND " + SQL.enclose(sql)) } } /* build status */ def build_status(options: Options, progress: Progress = new Progress, profiles: List[Profile] = default_profiles, only_sessions: Set[String] = Set.empty, verbose: Boolean = false, target_dir: Path = default_target_dir, ml_statistics: Boolean = false, image_size: (Int, Int) = default_image_size): Unit = { val ml_statistics_domain = Iterator(ML_Statistics.heap_fields, ML_Statistics.program_fields, ML_Statistics.tasks_fields, ML_Statistics.workers_fields).flatMap(_._2).toSet val data = read_data(options, progress = progress, profiles = profiles, only_sessions = only_sessions, verbose = verbose, ml_statistics = ml_statistics, ml_statistics_domain = ml_statistics_domain) present_data(data, progress = progress, target_dir = target_dir, image_size = image_size) } /* read data */ sealed case class Data(date: Date, entries: List[Data_Entry]) sealed case class Data_Entry( name: String, hosts: List[String], stretch: Double, sessions: List[Session]) { def failed_sessions: List[Session] = sessions.filter(_.head.failed).sortBy(_.name) } sealed case class Session( name: String, threads: Int, entries: List[Entry], ml_statistics: ML_Statistics, ml_statistics_date: Long) { require(entries.nonEmpty, "no entries") lazy val sorted_entries: List[Entry] = entries.sortBy(entry => - entry.date) def head: Entry = sorted_entries.head def order: Long = - head.timing.elapsed.ms def finished_entries: List[Entry] = sorted_entries.filter(_.finished) def finished_entries_size: Int = finished_entries.map(_.date).toSet.size def check_timing: Boolean = finished_entries_size >= 3 def check_heap: Boolean = finished_entries_size >= 3 && finished_entries.forall(entry => entry.maximum_heap > 0 || entry.average_heap > 0 || entry.stored_heap > 0) def make_csv: CSV.File = { val header = List("session_name", "chapter", "pull_date", "afp_pull_date", "isabelle_version", "afp_version", "timing_elapsed", "timing_cpu", "timing_gc", "ml_timing_elapsed", "ml_timing_cpu", "ml_timing_gc", "maximum_code", "average_code", "maximum_stack", "average_stack", "maximum_heap", "average_heap", "stored_heap", "status") val date_format = Date.Format("uuuu-MM-dd HH:mm:ss") val records = for (entry <- sorted_entries) yield { CSV.Record(name, entry.chapter, date_format(entry.pull_date), entry.afp_pull_date match { case Some(date) => date_format(date) case None => "" }, entry.isabelle_version, entry.afp_version, entry.timing.elapsed.ms, entry.timing.cpu.ms, entry.timing.gc.ms, entry.ml_timing.elapsed.ms, entry.ml_timing.cpu.ms, entry.ml_timing.gc.ms, entry.maximum_code, entry.average_code, entry.maximum_stack, entry.average_stack, entry.maximum_heap, entry.average_heap, entry.stored_heap, entry.status) } CSV.File(name, header, records) } } sealed case class Entry( chapter: String, pull_date: Date, afp_pull_date: Option[Date], isabelle_version: String, afp_version: String, timing: Timing, ml_timing: Timing, maximum_code: Long, average_code: Long, maximum_stack: Long, average_stack: Long, maximum_heap: Long, average_heap: Long, stored_heap: Long, status: Build_Log.Session_Status.Value, errors: List[String]) { val date: Long = (afp_pull_date getOrElse pull_date).unix_epoch def finished: Boolean = status == Build_Log.Session_Status.finished def failed: Boolean = status == Build_Log.Session_Status.failed def present_errors(name: String): XML.Body = { if (errors.isEmpty) HTML.text(name + print_version(isabelle_version, afp_version, chapter)) else { HTML.tooltip_errors(HTML.text(name), errors.map(s => HTML.text(Symbol.decode(s)))) :: HTML.text(print_version(isabelle_version, afp_version, chapter)) } } } sealed case class Image(name: String, width: Int, height: Int) { def path: Path = Path.basic(name) } def print_version( isabelle_version: String, afp_version: String = "", chapter: String = AFP.chapter): String = { val body = proper_string(isabelle_version).map("Isabelle/" + _).toList ::: (if (chapter == AFP.chapter) proper_string(afp_version).map("AFP/" + _) else None).toList if (body.isEmpty) "" else body.mkString(" (", ", ", ")") } def read_data(options: Options, progress: Progress = new Progress, profiles: List[Profile] = default_profiles, only_sessions: Set[String] = Set.empty, ml_statistics: Boolean = false, - ml_statistics_domain: String => Boolean = (key: String) => true, + ml_statistics_domain: String => Boolean = _ => true, verbose: Boolean = false): Data = { val date = Date.now() var data_hosts = Map.empty[String, Set[String]] var data_stretch = Map.empty[String, Double] var data_entries = Map.empty[String, Map[String, Session]] def get_hosts(data_name: String): Set[String] = data_hosts.getOrElse(data_name, Set.empty) val store = Build_Log.store(options) using(store.open_database())(db => { for (profile <- profiles.sortBy(_.description)) { progress.echo("input " + quote(profile.description)) val afp = profile.afp val columns = List( Build_Log.Data.pull_date(afp = false), Build_Log.Data.pull_date(afp = true), Build_Log.Prop.build_host, Build_Log.Prop.isabelle_version, Build_Log.Prop.afp_version, Build_Log.Settings.ISABELLE_BUILD_OPTIONS, Build_Log.Settings.ML_PLATFORM, Build_Log.Data.session_name, Build_Log.Data.chapter, Build_Log.Data.groups, Build_Log.Data.threads, Build_Log.Data.timing_elapsed, Build_Log.Data.timing_cpu, Build_Log.Data.timing_gc, Build_Log.Data.ml_timing_elapsed, Build_Log.Data.ml_timing_cpu, Build_Log.Data.ml_timing_gc, Build_Log.Data.heap_size, Build_Log.Data.status, Build_Log.Data.errors) ::: (if (ml_statistics) List(Build_Log.Data.ml_statistics) else Nil) val Threads_Option = """threads\s*=\s*(\d+)""".r val sql = profile.select(options, columns, only_sessions) progress.echo_if(verbose, sql) db.using_statement(sql)(stmt => { val res = stmt.execute_query() while (res.next()) { val session_name = res.string(Build_Log.Data.session_name) val chapter = res.string(Build_Log.Data.chapter) val groups = split_lines(res.string(Build_Log.Data.groups)) val threads = { val threads1 = res.string(Build_Log.Settings.ISABELLE_BUILD_OPTIONS) match { case Threads_Option(Value.Int(i)) => i case _ => 1 } val threads2 = res.get_int(Build_Log.Data.threads).getOrElse(1) threads1 max threads2 } val ml_platform = res.string(Build_Log.Settings.ML_PLATFORM) val ml_platform_64 = ml_platform.startsWith("x86_64-") || ml_platform.startsWith("arm64-") val data_name = profile.description + (if (ml_platform_64) ", 64bit" else "") + (if (threads == 1) "" else ", " + threads + " threads") res.get_string(Build_Log.Prop.build_host).foreach(host => data_hosts += (data_name -> (get_hosts(data_name) + host))) data_stretch += (data_name -> profile.stretch(options)) val isabelle_version = res.string(Build_Log.Prop.isabelle_version) val afp_version = res.string(Build_Log.Prop.afp_version) val ml_stats = ML_Statistics( if (ml_statistics) { Properties.uncompress(res.bytes(Build_Log.Data.ml_statistics), cache = store.cache) } else Nil, domain = ml_statistics_domain, heading = session_name + print_version(isabelle_version, afp_version, chapter)) val entry = Entry( chapter = chapter, pull_date = res.date(Build_Log.Data.pull_date(afp = false)), afp_pull_date = if (afp) res.get_date(Build_Log.Data.pull_date(afp = true)) else None, isabelle_version = isabelle_version, afp_version = afp_version, timing = res.timing( Build_Log.Data.timing_elapsed, Build_Log.Data.timing_cpu, Build_Log.Data.timing_gc), ml_timing = res.timing( Build_Log.Data.ml_timing_elapsed, Build_Log.Data.ml_timing_cpu, Build_Log.Data.ml_timing_gc), maximum_code = ml_stats.maximum(ML_Statistics.CODE_SIZE).toLong, average_code = ml_stats.average(ML_Statistics.CODE_SIZE).toLong, maximum_stack = ml_stats.maximum(ML_Statistics.STACK_SIZE).toLong, average_stack = ml_stats.average(ML_Statistics.STACK_SIZE).toLong, maximum_heap = ml_stats.maximum(ML_Statistics.HEAP_SIZE).toLong, average_heap = ml_stats.average(ML_Statistics.HEAP_SIZE).toLong, stored_heap = ML_Statistics.mem_scale(res.long(Build_Log.Data.heap_size)), status = Build_Log.Session_Status.withName(res.string(Build_Log.Data.status)), errors = Build_Log.uncompress_errors( res.bytes(Build_Log.Data.errors), cache = store.cache)) val sessions = data_entries.getOrElse(data_name, Map.empty) val session = sessions.get(session_name) match { case None => Session(session_name, threads, List(entry), ml_stats, entry.date) case Some(old) => val (ml_stats1, ml_stats1_date) = if (entry.date > old.ml_statistics_date) (ml_stats, entry.date) else (old.ml_statistics, old.ml_statistics_date) Session(session_name, threads, entry :: old.entries, ml_stats1, ml_stats1_date) } if ((!afp || chapter == AFP.chapter) && (!profile.bulky || groups.exists(AFP.groups_bulky.toSet))) { data_entries += (data_name -> (sessions + (session_name -> session))) } } }) } }) val sorted_entries = (for { (name, sessions) <- data_entries.toList sorted_sessions <- proper_list(sessions.toList.map(_._2).sortBy(_.order)) } yield { val hosts = get_hosts(name).toList.sorted val stretch = data_stretch(name) Data_Entry(name, hosts, stretch, sorted_sessions) }).sortBy(_.name) Data(date, sorted_entries) } /* present data */ def present_data(data: Data, progress: Progress = new Progress, target_dir: Path = default_target_dir, image_size: (Int, Int) = default_image_size): Unit = { def clean_name(name: String): String = name.flatMap(c => if (c == ' ' || c == '/') "_" else if (c == ',') "" else c.toString) HTML.write_document(target_dir, "index.html", List(HTML.title("Isabelle build status")), List(HTML.chapter("Isabelle build status"), HTML.par( List(HTML.description( List(HTML.text("status date:") -> HTML.text(data.date.toString))))), HTML.par( - List(HTML.itemize(data.entries.map({ case data_entry => + List(HTML.itemize(data.entries.map(data_entry => List( HTML.link(clean_name(data_entry.name) + "/index.html", HTML.text(data_entry.name))) ::: (data_entry.failed_sessions match { case Nil => Nil case sessions => HTML.break ::: List(HTML.span(HTML.error_message, HTML.text("Failed sessions:"))) ::: List(HTML.itemize(sessions.map(s => s.head.present_errors(s.name)))) }) - })))))) + )))))) for (data_entry <- data.entries) { val data_name = data_entry.name val (image_width, image_height) = image_size val image_width_stretch = (image_width * data_entry.stretch).toInt progress.echo("output " + quote(data_name)) val dir = Isabelle_System.make_directory(target_dir + Path.basic(clean_name(data_name))) val data_files = (for (session <- data_entry.sessions) yield { val csv_file = session.make_csv csv_file.write(dir) session.name -> csv_file }).toMap val session_plots = Par_List.map((session: Session) => Isabelle_System.with_tmp_file(session.name, "data") { data_file => Isabelle_System.with_tmp_file(session.name, "gnuplot") { gnuplot_file => def plot_name(kind: String): String = session.name + "_" + kind + ".png" File.write(data_file, cat_lines( session.finished_entries.map(entry => List(entry.date.toString, entry.timing.elapsed.minutes.toString, entry.timing.resources.minutes.toString, entry.ml_timing.elapsed.minutes.toString, entry.ml_timing.resources.minutes.toString, entry.maximum_code.toString, - entry.maximum_code.toString, + entry.average_code.toString, + entry.maximum_stack.toString, entry.average_stack.toString, - entry.maximum_stack.toString, - entry.average_heap.toString, + entry.maximum_heap.toString, entry.average_heap.toString, entry.stored_heap.toString).mkString(" ")))) val max_time = (session.finished_entries.foldLeft(0.0) { case (m, entry) => m.max(entry.timing.elapsed.minutes). max(entry.timing.resources.minutes). max(entry.ml_timing.elapsed.minutes). max(entry.ml_timing.resources.minutes) } max 0.1) * 1.1 val timing_range = "[0:" + max_time + "]" def gnuplot(plot_name: String, plots: List[String], range: String): Image = { val image = Image(plot_name, image_width_stretch, image_height) File.write(gnuplot_file, """ set terminal png size """ + image.width + "," + image.height + """ set output """ + quote(File.standard_path(dir + image.path)) + """ set xdata time set timefmt "%s" set format x "%d-%b" set xlabel """ + quote(session.name) + """ noenhanced set key left bottom plot [] """ + range + " " + plots.map(s => quote(data_file.implode) + " " + s).mkString(", ") + "\n") val result = Isabelle_System.bash("\"$ISABELLE_GNUPLOT\" " + File.bash_path(gnuplot_file)) if (!result.ok) result.error("Gnuplot failed for " + data_name + "/" + plot_name).check image } val timing_plots = { val plots1 = List( """ using 1:2 smooth sbezier title "elapsed time (smooth)" """, """ using 1:2 smooth csplines title "elapsed time" """) val plots2 = List( """ using 1:3 smooth sbezier title "cpu time (smooth)" """, """ using 1:3 smooth csplines title "cpu time" """) if (session.threads == 1) plots1 else plots1 ::: plots2 } val ml_timing_plots = List( """ using 1:4 smooth sbezier title "ML elapsed time (smooth)" """, """ using 1:4 smooth csplines title "ML elapsed time" """, """ using 1:5 smooth sbezier title "ML cpu time (smooth)" """, """ using 1:5 smooth csplines title "ML cpu time" """) val heap_plots = List( """ using 1:10 smooth sbezier title "heap maximum (smooth)" """, """ using 1:10 smooth csplines title "heap maximum" """, """ using 1:11 smooth sbezier title "heap average (smooth)" """, """ using 1:11 smooth csplines title "heap average" """, """ using 1:12 smooth sbezier title "heap stored (smooth)" """, """ using 1:12 smooth csplines title "heap stored" """) def jfreechart(plot_name: String, fields: ML_Statistics.Fields): Image = { val image = Image(plot_name, image_width, image_height) val chart = session.ml_statistics.chart( fields._1 + ": " + session.ml_statistics.heading, fields._2) Graphics_File.write_chart_png( (dir + image.path).file, chart, image.width, image.height) image } val images = (if (session.check_timing) List( gnuplot(plot_name("timing"), timing_plots, timing_range), gnuplot(plot_name("ml_timing"), ml_timing_plots, timing_range)) else Nil) ::: (if (session.check_heap) List(gnuplot(plot_name("heap"), heap_plots, "[0:]")) else Nil) ::: (if (session.ml_statistics.content.nonEmpty) List(jfreechart(plot_name("heap_chart"), ML_Statistics.heap_fields), jfreechart(plot_name("program_chart"), ML_Statistics.program_fields)) ::: (if (session.threads > 1) List( jfreechart(plot_name("tasks_chart"), ML_Statistics.tasks_fields), jfreechart(plot_name("workers_chart"), ML_Statistics.workers_fields)) else Nil) else Nil) session.name -> images } }, data_entry.sessions).toMap HTML.write_document(dir, "index.html", List(HTML.title("Isabelle build status for " + data_name)), HTML.chapter("Isabelle build status for " + data_name) :: HTML.par( List(HTML.description( List( HTML.text("status date:") -> HTML.text(data.date.toString), HTML.text("build host:") -> HTML.text(commas(data_entry.hosts)))))) :: HTML.par( List(HTML.itemize( data_entry.sessions.map(session => HTML.link("#session_" + session.name, HTML.text(session.name)) :: HTML.text(" (" + session.head.timing.message_resources + ")"))))) :: data_entry.sessions.flatMap(session => List( HTML.section(HTML.id("session_" + session.name), session.name), HTML.par( HTML.description( List( HTML.text("data:") -> List(HTML.link(data_files(session.name).file_name, HTML.text("CSV"))), HTML.text("timing:") -> HTML.text(session.head.timing.message_resources), HTML.text("ML timing:") -> HTML.text(session.head.ml_timing.message_resources)) ::: ML_Statistics.mem_print(session.head.maximum_code).map(s => HTML.text("code maximum:") -> HTML.text(s)).toList ::: ML_Statistics.mem_print(session.head.average_code).map(s => HTML.text("code average:") -> HTML.text(s)).toList ::: ML_Statistics.mem_print(session.head.maximum_stack).map(s => HTML.text("stack maximum:") -> HTML.text(s)).toList ::: ML_Statistics.mem_print(session.head.average_stack).map(s => HTML.text("stack average:") -> HTML.text(s)).toList ::: ML_Statistics.mem_print(session.head.maximum_heap).map(s => HTML.text("heap maximum:") -> HTML.text(s)).toList ::: ML_Statistics.mem_print(session.head.average_heap).map(s => HTML.text("heap average:") -> HTML.text(s)).toList ::: ML_Statistics.mem_print(session.head.stored_heap).map(s => HTML.text("heap stored:") -> HTML.text(s)).toList ::: proper_string(session.head.isabelle_version).map(s => HTML.text("Isabelle version:") -> HTML.text(s)).toList ::: proper_string(session.head.afp_version).map(s => HTML.text("AFP version:") -> HTML.text(s)).toList) :: session_plots.getOrElse(session.name, Nil).map(image => HTML.size(image.width / 2, image.height / 2)(HTML.image(image.name))))))) } } /* Isabelle tool wrapper */ val isabelle_tool = Isabelle_Tool("build_status", "present recent build status information from database", Scala_Project.here, args => { var target_dir = default_target_dir var ml_statistics = false var only_sessions = Set.empty[String] var options = Options.init() var image_size = default_image_size var verbose = false val getopts = Getopts(""" Usage: isabelle build_status [OPTIONS] Options are: -D DIR target directory (default """ + default_target_dir + """) -M include full ML statistics -S SESSIONS only given SESSIONS (comma separated) -l DAYS length of relevant history (default """ + options.int("build_log_history") + """) -o OPTION override Isabelle system OPTION (via NAME=VAL or NAME) -s WxH size of PNG image (default """ + image_size._1 + "x" + image_size._2 + """) -v verbose Present performance statistics from build log database, which is specified via system options build_log_database_host, build_log_database_user, build_log_history etc. """, "D:" -> (arg => target_dir = Path.explode(arg)), "M" -> (_ => ml_statistics = true), "S:" -> (arg => only_sessions = space_explode(',', arg).toSet), "l:" -> (arg => options = options + ("build_log_history=" + arg)), "o:" -> (arg => options = options + arg), "s:" -> (arg => - space_explode('x', arg).map(Value.Int.parse(_)) match { + space_explode('x', arg).map(Value.Int.parse) match { case List(w, h) if w > 0 && h > 0 => image_size = (w, h) case _ => error("Error bad PNG image size: " + quote(arg)) }), "v" -> (_ => verbose = true)) val more_args = getopts(args) if (more_args.nonEmpty) getopts.usage() val progress = new Console_Progress build_status(options, progress = progress, only_sessions = only_sessions, verbose = verbose, target_dir = target_dir, ml_statistics = ml_statistics, image_size = image_size) }) } diff --git a/src/Pure/Admin/isabelle_cronjob.scala b/src/Pure/Admin/isabelle_cronjob.scala --- a/src/Pure/Admin/isabelle_cronjob.scala +++ b/src/Pure/Admin/isabelle_cronjob.scala @@ -1,652 +1,652 @@ /* Title: Pure/Admin/isabelle_cronjob.scala Author: Makarius Main entry point for administrative cronjob at TUM. */ package isabelle import java.nio.file.Files import scala.annotation.tailrec object Isabelle_Cronjob { /* global resources: owned by main cronjob */ val backup = "lxbroy10:cronjob" val main_dir: Path = Path.explode("~/cronjob") val main_state_file: Path = main_dir + Path.explode("run/main.state") val current_log: Path = main_dir + Path.explode("run/main.log") // owned by log service val cumulative_log: Path = main_dir + Path.explode("log/main.log") // owned by log service val isabelle_repos: Path = main_dir + Path.explode("isabelle") val afp_repos: Path = main_dir + Path.explode("AFP") val mailman_archives_dir = Path.explode("~/cronjob/Mailman") val build_log_dirs = List(Path.explode("~/log"), Path.explode("~/afp/log"), Path.explode("~/cronjob/log")) /** logger tasks **/ sealed case class Logger_Task(name: String = "", body: Logger => Unit) /* init and exit */ def get_rev(): String = Mercurial.repository(isabelle_repos).id() def get_afp_rev(): String = Mercurial.repository(afp_repos).id() val init: Logger_Task = Logger_Task("init", logger => { Isabelle_Devel.make_index() Mercurial.setup_repository(Isabelle_System.isabelle_repository.root, isabelle_repos) Mercurial.setup_repository(Isabelle_System.afp_repository.root, afp_repos) File.write(logger.log_dir + Build_Log.log_filename("isabelle_identify", logger.start_date), Build_Log.Identify.content(logger.start_date, Some(get_rev()), Some(get_afp_rev()))) Isabelle_System.bash( """rsync -a --include="*/" --include="plain_identify*" --exclude="*" """ + Bash.string(backup + "/log/.") + " " + File.bash_path(main_dir) + "/log/.").check if (!Isabelle_Devel.cronjob_log.is_file) Files.createSymbolicLink(Isabelle_Devel.cronjob_log.java_path, current_log.java_path) }) val exit: Logger_Task = Logger_Task("exit", logger => { Isabelle_System.bash( "rsync -a " + File.bash_path(main_dir) + "/log/." + " " + Bash.string(backup) + "/log/.") .check }) /* Mailman archives */ val mailman_archives: Logger_Task = Logger_Task("mailman_archives", logger => { Mailman.isabelle_users.download(mailman_archives_dir) Mailman.isabelle_dev.download(mailman_archives_dir) }) /* build release */ val build_release: Logger_Task = Logger_Task("build_release", logger => { Isabelle_Devel.release_snapshot(logger.options, get_rev(), get_afp_rev()) }) /* remote build_history */ sealed case class Item( known: Boolean, isabelle_version: String, afp_version: Option[String], pull_date: Date) { def unknown: Boolean = !known def versions: (String, Option[String]) = (isabelle_version, afp_version) def known_versions(rev: String, afp_rev: Option[String]): Boolean = known && rev != "" && isabelle_version == rev && (afp_rev.isEmpty || afp_rev.get != "" && afp_version == afp_rev) } def recent_items(db: SQL.Database, days: Int, rev: String, afp_rev: Option[String], sql: SQL.Source): List[Item] = { val afp = afp_rev.isDefined val select = Build_Log.Data.select_recent_versions( days = days, rev = rev, afp_rev = afp_rev, sql = "WHERE " + sql) db.using_statement(select)(stmt => stmt.execute_query().iterator(res => { val known = res.bool(Build_Log.Data.known) val isabelle_version = res.string(Build_Log.Prop.isabelle_version) val afp_version = if (afp) proper_string(res.string(Build_Log.Prop.afp_version)) else None val pull_date = res.date(Build_Log.Data.pull_date(afp)) Item(known, isabelle_version, afp_version, pull_date) }).toList) } def unknown_runs(items: List[Item]): List[List[Item]] = { val (run, rest) = Library.take_prefix[Item](_.unknown, items.dropWhile(_.known)) if (run.nonEmpty) run :: unknown_runs(rest) else Nil } sealed case class Remote_Build( description: String, host: String, actual_host: String = "", user: String = "", port: Int = 0, proxy_host: String = "", proxy_user: String = "", proxy_port: Int = 0, self_update: Boolean = false, historic: Boolean = false, history: Int = 0, history_base: String = "build_history_base", java_heap: String = "", options: String = "", args: String = "", afp: Boolean = false, bulky: Boolean = false, more_hosts: List[String] = Nil, detect: SQL.Source = "", active: Boolean = true) { def ssh_session(context: SSH.Context): SSH.Session = context.open_session(host = host, user = user, port = port, actual_host = actual_host, proxy_host = proxy_host, proxy_user = proxy_user, proxy_port = proxy_port, permissive = proxy_host.nonEmpty) def sql: SQL.Source = Build_Log.Prop.build_engine.toString + " = " + SQL.string(Build_History.engine) + " AND " + SQL.member(Build_Log.Prop.build_host.ident, host :: more_hosts) + (if (detect == "") "" else " AND " + SQL.enclose(detect)) def profile: Build_Status.Profile = Build_Status.Profile(description, history = history, afp = afp, bulky = bulky, sql = sql) def pick( options: Options, rev: String = "", filter: Item => Boolean = _ => true): Option[(String, Option[String])] = { val afp_rev = if (afp) Some(get_afp_rev()) else None val store = Build_Log.store(options) using(store.open_database())(db => { def pick_days(days: Int, gap: Int): Option[(String, Option[String])] = { val items = recent_items(db, days, rev, afp_rev, sql).filter(filter) def runs = unknown_runs(items).filter(run => run.length >= gap) if (historic || items.exists(_.known_versions(rev, afp_rev))) { val longest_run = runs.foldLeft(List.empty[Item]) { case (item1, item2) => if (item1.length >= item2.length) item1 else item2 } if (longest_run.isEmpty) None else Some(longest_run(longest_run.length / 2).versions) } else if (rev != "") Some((rev, afp_rev)) else runs.flatten.headOption.map(_.versions) } pick_days(options.int("build_log_history") max history, 2) orElse pick_days(200, 5) orElse pick_days(2000, 1) }) } def build_history_options: String = " -h " + Bash.string(host) + " " + (java_heap match { case "" => "" case h => "-e 'ISABELLE_TOOL_JAVA_OPTIONS=\"$ISABELLE_TOOL_JAVA_OPTIONS -Xmx" + h + "\"' " }) + options } val remote_builds_old: List[Remote_Build] = List( Remote_Build("Linux A", "i21of4", user = "i21isatest", proxy_host = "lxbroy10", proxy_user = "i21isatest", self_update = true, options = "-m32 -M1x4,2,4" + " -e ISABELLE_OCAML=ocaml -e ISABELLE_OCAMLC=ocamlc -e ISABELLE_OCAML_SETUP=true" + " -e ISABELLE_GHC_SETUP=true" + " -e ISABELLE_MLTON=mlton" + " -e ISABELLE_SMLNJ=sml" + " -e ISABELLE_SWIPL=swipl", args = "-a -d '~~/src/Benchmarks'"), Remote_Build("Linux A", "lxbroy9", java_heap = "2g", options = "-m32 -B -M1x2,2", args = "-N -g timing"), Remote_Build("Linux Benchmarks", "lxbroy5", historic = true, history = 90, java_heap = "2g", options = "-m32 -B -M1x2,2 -t Benchmarks" + " -e ISABELLE_GHC=ghc -e ISABELLE_MLTON=mlton -e ISABELLE_OCAML=ocaml" + " -e ISABELLE_OCAMLC=ocamlc -e ISABELLE_OCAMLFIND=ocamlfind -e ISABELLE_SMLNJ=sml" + " -e ISABELLE_SWIPL=swipl", args = "-N -a -d '~~/src/Benchmarks'", detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("Benchmarks")), Remote_Build("macOS 10.14 Mojave (Old)", "lapnipkow3", options = "-m32 -M1,2 -e ISABELLE_GHC_SETUP=true -p pide_session=false", self_update = true, args = "-a -d '~~/src/Benchmarks'"), Remote_Build("AFP old bulky", "lrzcloud1", self_update = true, proxy_host = "lxbroy10", proxy_user = "i21isatest", options = "-m64 -M6 -U30000 -s10 -t AFP", args = "-g large -g slow", afp = true, bulky = true, detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("AFP")), Remote_Build("AFP old", "lxbroy7", args = "-N -X large -X slow", afp = true, detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("AFP")), Remote_Build("Poly/ML 5.7 Linux", "lxbroy8", history_base = "37074e22e8be", options = "-m32 -B -M1x2,2 -t polyml-5.7 -i 'init_component /home/isabelle/contrib/polyml-5.7'", args = "-N -g timing", detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("polyml-5.7") + " AND " + Build_Log.Settings.ML_OPTIONS + " <> " + SQL.string("-H 500")), Remote_Build("Poly/ML 5.7.1 Linux", "lxbroy8", history_base = "a9d5b59c3e12", options = "-m32 -B -M1x2,2 -t polyml-5.7.1-pre2 -i 'init_component /home/isabelle/contrib/polyml-test-905dae2ebfda'", args = "-N -g timing", detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("polyml-5.7.1-pre1") + " OR " + Build_Log.Prop.build_tags + " = " + SQL.string("polyml-5.7.1-pre2")), Remote_Build("Poly/ML 5.7 macOS", "macbroy2", history_base = "37074e22e8be", options = "-m32 -B -M1x4,4 -t polyml-5.7 -i 'init_component /home/isabelle/contrib/polyml-5.7'", args = "-a", detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("polyml-5.7")), Remote_Build("Poly/ML 5.7.1 macOS", "macbroy2", history_base = "a9d5b59c3e12", options = "-m32 -B -M1x4,4 -t polyml-5.7.1-pre2 -i 'init_component /home/isabelle/contrib/polyml-test-905dae2ebfda'", args = "-a", detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("polyml-5.7.1-pre1") + " OR " + Build_Log.Prop.build_tags + " = " + SQL.string("polyml-5.7.1-pre2")), Remote_Build("macOS", "macbroy2", options = "-m32 -M8" + " -e ISABELLE_GHC=ghc -e ISABELLE_MLTON=mlton -e ISABELLE_OCAML=ocaml" + " -e ISABELLE_OCAMLC=ocamlc -e ISABELLE_OCAML_SETUP=true" + " -e ISABELLE_OPAM_ROOT=\"$ISABELLE_HOME/opam\"" + " -e ISABELLE_SMLNJ=/home/isabelle/smlnj/110.85/bin/sml" + " -p pide_session=false", args = "-a", detect = Build_Log.Prop.build_tags.undefined, history_base = "2c0f24e927dd"), Remote_Build("macOS, quick_and_dirty", "macbroy2", options = "-m32 -M8 -t quick_and_dirty -p pide_session=false", args = "-a -o quick_and_dirty", detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("quick_and_dirty"), history_base = "2c0f24e927dd"), Remote_Build("macOS, skip_proofs", "macbroy2", options = "-m32 -M8 -t skip_proofs -p pide_session=false", args = "-a -o skip_proofs", detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("skip_proofs"), history_base = "2c0f24e927dd"), Remote_Build("Poly/ML test", "lxbroy8", options = "-m32 -B -M1x2,2 -t polyml-test -i 'init_component /home/isabelle/contrib/polyml-5.7-20170217'", args = "-N -g timing", detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("polyml-test")), Remote_Build("macOS 10.12 Sierra", "macbroy30", options = "-m32 -M2 -p pide_session=false", args = "-a", detect = Build_Log.Prop.build_start.toString + " > date '2017-03-03'"), Remote_Build("macOS 10.10 Yosemite", "macbroy31", options = "-m32 -M2 -p pide_session=false", args = "-a"), Remote_Build("macOS 10.8 Mountain Lion", "macbroy30", options = "-m32 -M2", args = "-a", detect = Build_Log.Prop.build_start.toString + " < date '2017-03-03'")) ::: { for { (n, hosts) <- List(1 -> List("lxbroy6"), 2 -> List("lxbroy8", "lxbroy5")) } yield { Remote_Build("AFP old", host = hosts.head, more_hosts = hosts.tail, options = "-m32 -M1x2 -t AFP -P" + n + " -e ISABELLE_GHC=ghc" + " -e ISABELLE_MLTON=mlton" + " -e ISABELLE_OCAML=ocaml -e ISABELLE_OCAMLC=ocamlc -e ISABELLE_OCAMLFIND=ocamlfind" + " -e ISABELLE_SMLNJ=sml", args = "-N -X large -X slow", afp = true, detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("AFP")) } } val remote_builds1: List[List[Remote_Build]] = { List( List(Remote_Build("Linux A", "augsburg1", - options = "-m32 -B -M1x2,2,4" + + options = "-m32 -B -M4" + " -e ISABELLE_OCAML=ocaml -e ISABELLE_OCAMLC=ocamlc -e ISABELLE_OCAMLFIND=ocamlfind" + " -e ISABELLE_GHC_SETUP=true" + " -e ISABELLE_MLTON=mlton" + " -e ISABELLE_SMLNJ=sml" + " -e ISABELLE_SWIPL=swipl", self_update = true, args = "-a -d '~~/src/Benchmarks'")), List(Remote_Build("Linux B", "lxbroy10", historic = true, history = 90, options = "-m32 -B -M1x4,2,4,6", args = "-N -g timing")), List(Remote_Build("macOS 10.13 High Sierra", "lapbroy68", options = "-m32 -B -M1,2,4 -e ISABELLE_GHC_SETUP=true -p pide_session=false", self_update = true, args = "-a -d '~~/src/Benchmarks'")), List( Remote_Build("macOS 11 Big Sur", "mini1", options = "-m32 -B -M1x2,2,4 -p pide_session=false" + " -e ISABELLE_OCAML=ocaml -e ISABELLE_OCAMLC=ocamlc -e ISABELLE_OCAML_SETUP=true" + " -e ISABELLE_GHC_SETUP=true" + " -e ISABELLE_MLTON=/usr/local/bin/mlton" + " -e ISABELLE_SMLNJ=/usr/local/smlnj/bin/sml" + " -e ISABELLE_SWIPL=/usr/local/bin/swipl", self_update = true, args = "-a -d '~~/src/Benchmarks'")), List( Remote_Build("macOS 10.14 Mojave", "mini2", options = "-m32 -B -M1x2,2,4 -p pide_session=false" + " -e ISABELLE_OCAML=ocaml -e ISABELLE_OCAMLC=ocamlc -e ISABELLE_OCAML_SETUP=true" + " -e ISABELLE_GHC_SETUP=true" + " -e ISABELLE_MLTON=/usr/local/bin/mlton" + " -e ISABELLE_SMLNJ=/usr/local/smlnj/bin/sml" + " -e ISABELLE_SWIPL=/usr/local/bin/swipl", self_update = true, args = "-a -d '~~/src/Benchmarks'"), Remote_Build("macOS, quick_and_dirty", "mini2", options = "-m32 -M4 -t quick_and_dirty -p pide_session=false", self_update = true, args = "-a -o quick_and_dirty", detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("quick_and_dirty")), Remote_Build("macOS, skip_proofs", "mini2", options = "-m32 -M4 -t skip_proofs -p pide_session=false", args = "-a -o skip_proofs", detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("skip_proofs"))), List(Remote_Build("macOS 10.15 Catalina", "laramac01", user = "makarius", proxy_host = "laraserver", proxy_user = "makarius", self_update = true, options = "-m32 -M4 -e ISABELLE_GHC_SETUP=true -p pide_session=false", args = "-a -d '~~/src/Benchmarks'")), List( Remote_Build("Windows", "vmnipkow9", historic = true, history = 90, self_update = true, options = "-m32 -M4" + " -C /cygdrive/d/isatest/contrib" + " -e ISABELLE_OCAML=ocaml -e ISABELLE_OCAMLC=ocamlc -e ISABELLE_OCAML_SETUP=true" + " -e ISABELLE_GHC_SETUP=true" + " -e ISABELLE_SMLNJ=/usr/local/smlnj-110.81/bin/sml", args = "-a", detect = Build_Log.Settings.ML_PLATFORM.toString + " = " + SQL.string("x86-windows") + " OR " + Build_Log.Settings.ML_PLATFORM + " = " + SQL.string("x86_64_32-windows")), Remote_Build("Windows", "vmnipkow9", historic = true, history = 90, self_update = true, options = "-m64 -M4" + " -C /cygdrive/d/isatest/contrib" + " -e ISABELLE_OCAML=ocaml -e ISABELLE_OCAMLC=ocamlc -e ISABELLE_OCAML_SETUP=true" + " -e ISABELLE_GHC_SETUP=true" + " -e ISABELLE_SMLNJ=/usr/local/smlnj-110.81/bin/sml", args = "-a", detect = Build_Log.Settings.ML_PLATFORM.toString + " = " + SQL.string("x86_64-windows")))) } val remote_builds2: List[List[Remote_Build]] = List( List( Remote_Build("AFP", "lrzcloud2", actual_host = "10.195.4.41", self_update = true, proxy_host = "lxbroy10", proxy_user = "i21isatest", java_heap = "8g", options = "-m32 -M1x6 -t AFP" + " -e ISABELLE_GHC=ghc" + " -e ISABELLE_MLTON=mlton" + " -e ISABELLE_OCAML=ocaml -e ISABELLE_OCAMLC=ocamlc -e ISABELLE_OCAMLFIND=ocamlfind" + " -e ISABELLE_SMLNJ=sml", args = "-a -X large -X slow", afp = true, detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("AFP")), Remote_Build("AFP", "lrzcloud2", actual_host = "10.195.4.41", self_update = true, proxy_host = "lxbroy10", proxy_user = "i21isatest", java_heap = "8g", options = "-m64 -M8 -U30000 -s10 -t AFP", args = "-g large -g slow", afp = true, bulky = true, detect = Build_Log.Prop.build_tags.toString + " = " + SQL.string("AFP")))) def remote_build_history(rev: String, afp_rev: Option[String], i: Int, r: Remote_Build) : Logger_Task = { val task_name = "build_history-" + r.host Logger_Task(task_name, logger => { using(r.ssh_session(logger.ssh_context))(ssh => { val results = Build_History.remote_build_history(ssh, isabelle_repos, isabelle_repos.ext(r.host), isabelle_identifier = "cronjob_build_history", self_update = r.self_update, rev = rev, afp_rev = afp_rev, options = " -N " + Bash.string(task_name) + (if (i < 0) "" else "_" + (i + 1).toString) + " -R " + Bash.string(Components.default_component_repository) + " -C '$USER_HOME/.isabelle/contrib' -f " + r.build_history_options, args = "-o timeout=10800 " + r.args) for ((log_name, bytes) <- results) { logger.log(Date.now(), log_name) Bytes.write(logger.log_dir + Path.explode(log_name), bytes) } }) }) } val build_status_profiles: List[Build_Status.Profile] = (remote_builds_old :: remote_builds1 ::: remote_builds2).flatten.map(_.profile) /** task logging **/ object Log_Service { def apply(options: Options, progress: Progress = new Progress): Log_Service = new Log_Service(SSH.init_context(options), progress) } class Log_Service private(val ssh_context: SSH.Context, progress: Progress) { current_log.file.delete private val thread: Consumer_Thread[String] = Consumer_Thread.fork("cronjob: logger", daemon = true)( consume = (text: String) => { // critical File.append(current_log, text + "\n") File.append(cumulative_log, text + "\n") progress.echo(text) true }) def shutdown(): Unit = { thread.shutdown() } val hostname: String = Isabelle_System.hostname() def log(date: Date, task_name: String, msg: String): Unit = if (task_name != "") thread.send( "[" + Build_Log.print_date(date) + ", " + hostname + ", " + task_name + "]: " + msg) def start_logger(start_date: Date, task_name: String): Logger = new Logger(this, start_date, task_name) def run_task(start_date: Date, task: Logger_Task): Unit = { val logger = start_logger(start_date, task.name) val res = Exn.capture { task.body(logger) } val end_date = Date.now() val err = res match { case Exn.Res(_) => None case Exn.Exn(exn) => Output.writeln("Exception trace for " + quote(task.name) + ":") exn.printStackTrace() val first_line = split_lines(Exn.message(exn)).headOption getOrElse "exception" Some(first_line) } logger.log_end(end_date, err) } def fork_task(start_date: Date, task: Logger_Task): Task = new Task(task.name, run_task(start_date, task)) } class Logger private[Isabelle_Cronjob]( val log_service: Log_Service, val start_date: Date, val task_name: String) { def ssh_context: SSH.Context = log_service.ssh_context def options: Options = ssh_context.options def log(date: Date, msg: String): Unit = log_service.log(date, task_name, msg) def log_end(end_date: Date, err: Option[String]): Unit = { val elapsed_time = end_date.time - start_date.time val msg = (if (err.isEmpty) "finished" else "ERROR " + err.get) + (if (elapsed_time.seconds < 3.0) "" else " (" + elapsed_time.message_hms + " elapsed time)") log(end_date, msg) } val log_dir = Isabelle_System.make_directory(main_dir + Build_Log.log_subdir(start_date)) log(start_date, "started") } class Task private[Isabelle_Cronjob](name: String, body: => Unit) { private val future: Future[Unit] = Future.thread("cronjob: " + name) { body } def is_finished: Boolean = future.is_finished } /** cronjob **/ def cronjob(progress: Progress, exclude_task: Set[String]): Unit = { /* soft lock */ val still_running = try { Some(File.read(main_state_file)) } catch { case ERROR(_) => None } still_running match { case None | Some("") => case Some(running) => error("Isabelle cronjob appears to be still running: " + running) } /* log service */ val log_service = Log_Service(Options.init(), progress = progress) def run(start_date: Date, task: Logger_Task): Unit = log_service.run_task(start_date, task) def run_now(task: Logger_Task): Unit = run(Date.now(), task) /* structured tasks */ def SEQ(tasks: List[Logger_Task]): Logger_Task = Logger_Task(body = _ => for (task <- tasks.iterator if !exclude_task(task.name) || task.name == "") run_now(task)) def PAR(tasks: List[Logger_Task]): Logger_Task = Logger_Task(body = _ => { @tailrec def join(running: List[Task]): Unit = { running.partition(_.is_finished) match { case (Nil, Nil) => case (Nil, _ :: _) => Time.seconds(0.5).sleep(); join(running) case (_ :: _, remaining) => join(remaining) } } val start_date = Date.now() val running = for (task <- tasks if !exclude_task(task.name)) yield log_service.fork_task(start_date, task) join(running) }) /* repository structure */ val hg = Mercurial.repository(isabelle_repos) val hg_graph = hg.graph() def history_base_filter(r: Remote_Build): Item => Boolean = { val base_rev = hg.id(r.history_base) val nodes = hg_graph.all_succs(List(base_rev)).toSet (item: Item) => nodes(item.isabelle_version) } /* main */ val main_start_date = Date.now() File.write(main_state_file, main_start_date.toString + " " + log_service.hostname) run(main_start_date, Logger_Task("isabelle_cronjob", logger => run_now( SEQ(List( init, PAR(List(mailman_archives, build_release)), PAR( List(remote_builds1, remote_builds2).map(remote_builds => SEQ(List( PAR(remote_builds.map(_.filter(_.active)).map(seq => SEQ( for { (r, i) <- (if (seq.length <= 1) seq.map((_, -1)) else seq.zipWithIndex) (rev, afp_rev) <- r.pick(logger.options, hg.id(), history_base_filter(r)) } yield remote_build_history(rev, afp_rev, i, r)))), Logger_Task("jenkins_logs", _ => Jenkins.download_logs(logger.options, Jenkins.build_log_jobs, main_dir)), Logger_Task("build_log_database", logger => Isabelle_Devel.build_log_database(logger.options, build_log_dirs)), Logger_Task("build_status", logger => Isabelle_Devel.build_status(logger.options)))))), exit))))) log_service.shutdown() main_state_file.file.delete } /** command line entry point **/ def main(args: Array[String]): Unit = { Command_Line.tool { var force = false var verbose = false var exclude_task = Set.empty[String] val getopts = Getopts(""" Usage: Admin/cronjob/main [OPTIONS] Options are: -f apply force to do anything -v verbose -x NAME exclude tasks with this name """, "f" -> (_ => force = true), "v" -> (_ => verbose = true), "x:" -> (arg => exclude_task += arg)) val more_args = getopts(args) if (more_args.nonEmpty) getopts.usage() val progress = if (verbose) new Console_Progress() else new Progress if (force) cronjob(progress, exclude_task) else error("Need to apply force to do anything") } } } diff --git a/src/Pure/General/mailman.scala b/src/Pure/General/mailman.scala --- a/src/Pure/General/mailman.scala +++ b/src/Pure/General/mailman.scala @@ -1,71 +1,101 @@ /* Title: Pure/General/mailman.scala Author: Makarius Support for Mailman list servers. */ package isabelle import java.net.URL +import scala.util.matching.Regex + object Mailman { /* mailing list archives */ - def archive(url: URL, name: String = ""): Archive = + def archive(url: URL, msg_format: Msg_Format, name: String = ""): Archive = { - val text = Url.read(url) - val hrefs = """href="([^"]+\.txt(?:\.gz)?)"""".r.findAllMatchIn(text).map(_.group(1)).toList - val title = - """The ([^</>]*) Archives""".r.findFirstMatchIn(text).map(_.group(1)) val list_url = Url(Library.take_suffix[Char](_ == '/', Url.trim_index(url).toString.toList)._1.mkString + "/") + + val html = Url.read(list_url) + val title = + """The ([^</>]*) Archives""".r.findFirstMatchIn(html).map(_.group(1)) + val hrefs_text = + """href="([^"]+\.txt(?:\.gz)?)"""".r.findAllMatchIn(html).map(_.group(1)).toList + val list_name = (proper_string(name) orElse title).getOrElse(error("Failed to determine mailing list name")) - new Archive(list_url, list_name, hrefs) + new Archive(list_url, list_name, msg_format, hrefs_text) } - class Archive private[Mailman](val list_url: URL, val list_name: String, hrefs: List[String]) + abstract class Msg_Format + { + def regex: Regex + } + + class Archive private[Mailman]( + val list_url: URL, + val list_name: String, + msg_format: Msg_Format, + hrefs_text: List[String]) { override def toString: String = list_name - def download(target_dir: Path, progress: Progress = new Progress): List[Path] = + private def hrefs_msg: List[String] = + (for { + href <- """href="([^"]+)/date.html"""".r.findAllMatchIn(Url.read(list_url)).map(_.group(1)) + html = Url.read(new URL(list_url, href + "/date.html")) + msg <- msg_format.regex.findAllMatchIn(html).map(_.group(1)) + } yield href + "/" + msg).toList + + def get(target_dir: Path, href: String, progress: Progress = new Progress): Option[Path] = { val dir = target_dir + Path.basic(list_name) - Isabelle_System.make_directory(dir) + val path = dir + Path.explode(href) + val url = new URL(list_url, href) + val connection = url.openConnection + try { + val length = connection.getContentLengthLong + val timestamp = connection.getLastModified + val file = path.file + if (file.isFile && file.length == length && file.lastModified == timestamp) None + else { + Isabelle_System.make_directory(path.dir) + progress.echo("Getting " + url) + val bytes = + using(connection.getInputStream)(Bytes.read_stream(_, hint = length.toInt max 1024)) + Bytes.write(file, bytes) + file.setLastModified(timestamp) + Some(path) + } + } + finally { connection.getInputStream.close() } + } - hrefs.flatMap(name => - { - val path = dir + Path.basic(name) - val url = new URL(list_url, name) - val connection = url.openConnection - try { - val length = connection.getContentLengthLong - val timestamp = connection.getLastModified - val file = path.file - if (file.isFile && file.length == length && file.lastModified == timestamp) None - else { - progress.echo("Getting " + url) - val bytes = - using(connection.getInputStream)(Bytes.read_stream(_, hint = length.toInt max 1024)) - Bytes.write(file, bytes) - file.setLastModified(timestamp) - Some(path) - } - } - finally { connection.getInputStream.close() } - }) - } + def download_text(target_dir: Path, progress: Progress = new Progress): List[Path] = + hrefs_text.flatMap(get(target_dir, _, progress = progress)) + + def download_msg(target_dir: Path, progress: Progress = new Progress): List[Path] = + hrefs_msg.flatMap(get(target_dir, _, progress = progress)) + + def download(target_dir: Path, progress: Progress = new Progress): List[Path] = + download_text(target_dir, progress = progress) ::: + download_msg(target_dir, progress = progress) } /* Isabelle mailing lists */ def isabelle_users: Archive = - archive(Url("https://lists.cam.ac.uk/pipermail/cl-isabelle-users"), name = "isabelle-users") + archive(Url("https://lists.cam.ac.uk/pipermail/cl-isabelle-users"), name = "isabelle-users", + msg_format = + new Msg_Format { val regex: Regex = """
  • """.r }) } diff --git a/src/Pure/General/scan.scala b/src/Pure/General/scan.scala --- a/src/Pure/General/scan.scala +++ b/src/Pure/General/scan.scala @@ -1,519 +1,483 @@ /* Title: Pure/General/scan.scala Author: Makarius Efficient scanning of keywords and tokens. */ package isabelle import scala.annotation.tailrec import scala.collection.IndexedSeq import scala.util.matching.Regex import scala.util.parsing.input.{OffsetPosition, Position => InputPosition, Reader, CharSequenceReader, PagedSeq} import scala.util.parsing.combinator.RegexParsers import java.io.{File => JFile, BufferedInputStream, FileInputStream, InputStream} import java.net.URL object Scan { /** context of partial line-oriented scans **/ abstract class Line_Context case object Finished extends Line_Context case class Quoted(quote: String) extends Line_Context - case object Verbatim extends Line_Context case class Cartouche(depth: Int) extends Line_Context case class Comment_Prefix(symbol: Symbol.Symbol) extends Line_Context case class Cartouche_Comment(depth: Int) extends Line_Context case class Comment(depth: Int) extends Line_Context /** parser combinators **/ object Parsers extends Parsers trait Parsers extends RegexParsers { override val whiteSpace: Regex = "".r /* optional termination */ def opt_term[T](p: => Parser[T]): Parser[Option[T]] = p ^^ (x => Some(x)) | """\z""".r ^^ (_ => None) /* repeated symbols */ def repeated(pred: Symbol.Symbol => Boolean, min_count: Int, max_count: Int): Parser[String] = new Parser[String] { def apply(in: Input) = { val start = in.offset val end = in.source.length val matcher = new Symbol.Matcher(in.source) var i = start var count = 0 var finished = false while (!finished && i < end && count < max_count) { val n = matcher(i, end) val sym = in.source.subSequence(i, i + n).toString if (pred(sym)) { i += n; count += 1 } else finished = true } if (count < min_count) Failure("bad input", in) else Success(in.source.subSequence(start, i).toString, in.drop(i - start)) } }.named("repeated") def one(pred: Symbol.Symbol => Boolean): Parser[String] = repeated(pred, 1, 1) def maybe(pred: Symbol.Symbol => Boolean): Parser[String] = repeated(pred, 0, 1) def many(pred: Symbol.Symbol => Boolean): Parser[String] = repeated(pred, 0, Integer.MAX_VALUE) def many1(pred: Symbol.Symbol => Boolean): Parser[String] = repeated(pred, 1, Integer.MAX_VALUE) /* character */ def character(pred: Char => Boolean): Symbol.Symbol => Boolean = (s: Symbol. Symbol) => s.length == 1 && pred(s.charAt(0)) /* quoted strings */ private def quoted_body(quote: Symbol.Symbol): Parser[String] = { rep(many1(sym => sym != quote && sym != "\\") | "\\" + quote | "\\\\" | ("""\\\d\d\d""".r ^? { case x if x.substring(1, 4).toInt <= 255 => x })) ^^ (_.mkString) } def quoted(quote: Symbol.Symbol): Parser[String] = { quote ~ quoted_body(quote) ~ quote ^^ { case x ~ y ~ z => x + y + z } }.named("quoted") def quoted_content(quote: Symbol.Symbol, source: String): String = { require(parseAll(quoted(quote), source).successful, "no quoted text") val body = source.substring(1, source.length - 1) if (body.exists(_ == '\\')) { val content = rep(many1(sym => sym != quote && sym != "\\") | "\\" ~> (quote | "\\" | """\d\d\d""".r ^^ { case x => x.toInt.toChar.toString })) parseAll(content ^^ (_.mkString), body).get } else body } def quoted_line(quote: Symbol.Symbol, ctxt: Line_Context): Parser[(String, Line_Context)] = { ctxt match { case Finished => quote ~ quoted_body(quote) ~ opt_term(quote) ^^ { case x ~ y ~ Some(z) => (x + y + z, Finished) case x ~ y ~ None => (x + y, Quoted(quote)) } case Quoted(q) if q == quote => quoted_body(quote) ~ opt_term(quote) ^^ { case x ~ Some(y) => (x + y, Finished) case x ~ None => (x, ctxt) } case _ => failure("") } }.named("quoted_line") def recover_quoted(quote: Symbol.Symbol): Parser[String] = quote ~ quoted_body(quote) ^^ { case x ~ y => x + y } - /* verbatim text */ - - private def verbatim_body: Parser[String] = - rep(many1(sym => sym != "*") | """\*(?!\})""".r) ^^ (_.mkString) - - def verbatim: Parser[String] = - { - "{*" ~ verbatim_body ~ "*}" ^^ { case x ~ y ~ z => x + y + z } - }.named("verbatim") - - def verbatim_content(source: String): String = - { - require(parseAll(verbatim, source).successful, "no verbatim text") - source.substring(2, source.length - 2) - } - - def verbatim_line(ctxt: Line_Context): Parser[(String, Line_Context)] = - { - ctxt match { - case Finished => - "{*" ~ verbatim_body ~ opt_term("*}") ^^ - { case x ~ y ~ Some(z) => (x + y + z, Finished) - case x ~ y ~ None => (x + y, Verbatim) } - case Verbatim => - verbatim_body ~ opt_term("*}") ^^ - { case x ~ Some(y) => (x + y, Finished) - case x ~ None => (x, Verbatim) } - case _ => failure("") - } - }.named("verbatim_line") - - val recover_verbatim: Parser[String] = - "{*" ~ verbatim_body ^^ { case x ~ y => x + y } - - /* nested text cartouches */ def cartouche_depth(depth: Int): Parser[(String, Int)] = new Parser[(String, Int)] { require(depth >= 0, "bad cartouche depth") def apply(in: Input) = { val start = in.offset val end = in.source.length val matcher = new Symbol.Matcher(in.source) var i = start var d = depth var finished = false while (!finished && i < end) { val n = matcher(i, end) val sym = in.source.subSequence(i, i + n).toString if (Symbol.is_open(sym)) { i += n; d += 1 } else if (Symbol.is_close(sym) && d > 0) { i += n; d -= 1; if (d == 0) finished = true } else if (d > 0) i += n else finished = true } if (i == start) Failure("bad input", in) else Success((in.source.subSequence(start, i).toString, d), in.drop(i - start)) } }.named("cartouche_depth") def cartouche: Parser[String] = cartouche_depth(0) ^? { case (x, d) if d == 0 => x } def cartouche_line(ctxt: Line_Context): Parser[(String, Line_Context)] = { def cartouche_context(d: Int): Line_Context = if (d == 0) Finished else Cartouche(d) ctxt match { case Finished => cartouche_depth(0) ^^ { case (c, d) => (c, cartouche_context(d)) } case Cartouche(depth) => cartouche_depth(depth) ^^ { case (c, d) => (c, cartouche_context(d)) } case _ => failure("") } } val recover_cartouche: Parser[String] = cartouche_depth(0) ^^ (_._1) def cartouche_content(source: String): String = { def err(): Nothing = error("Malformed text cartouche: " + quote(source)) val source1 = Library.try_unprefix(Symbol.open_decoded, source) orElse Library.try_unprefix(Symbol.open, source) getOrElse err() Library.try_unsuffix(Symbol.close_decoded, source1) orElse Library.try_unsuffix(Symbol.close, source1) getOrElse err() } /* nested comments */ private def comment_depth(depth: Int): Parser[(String, Int)] = new Parser[(String, Int)] { require(depth >= 0, "bad comment depth") val comment_text: Parser[List[String]] = rep1(many1(sym => sym != "*" && sym != "(") | """\*(?!\))|\((?!\*)""".r) def apply(in: Input) = { var rest = in def try_parse[A](p: Parser[A]): Boolean = { parse(p ^^^ (()), rest) match { case Success(_, next) => { rest = next; true } case _ => false } } var d = depth var finished = false while (!finished) { if (try_parse("(*")) d += 1 else if (d > 0 && try_parse("*)")) { d -= 1; if (d == 0) finished = true } else if (d == 0 || !try_parse(comment_text)) finished = true } if (in.offset < rest.offset) Success((in.source.subSequence(in.offset, rest.offset).toString, d), rest) else Failure("comment expected", in) } }.named("comment_depth") def comment: Parser[String] = comment_depth(0) ^? { case (x, d) if d == 0 => x } def comment_line(ctxt: Line_Context): Parser[(String, Line_Context)] = { val depth = ctxt match { case Finished => 0 case Comment(d) => d case _ => -1 } if (depth >= 0) comment_depth(depth) ^^ { case (x, 0) => (x, Finished) case (x, d) => (x, Comment(d)) } else failure("") } val recover_comment: Parser[String] = comment_depth(0) ^^ (_._1) def comment_content(source: String): String = { require(parseAll(comment, source).successful, "no comment") source.substring(2, source.length - 2) } /* control cartouches */ val control_symbol: Parser[String] = one(Symbol.is_control) val control_cartouche: Parser[String] = control_symbol ~ cartouche ^^ { case a ~ b => a + b } /* keyword */ def literal(lexicon: Lexicon): Parser[String] = new Parser[String] { def apply(in: Input) = { val result = lexicon.scan(in) if (result.isEmpty) Failure("keyword expected", in) else Success(result, in.drop(result.length)) } }.named("keyword") } /** Lexicon -- position tree **/ object Lexicon { /* representation */ private sealed case class Tree(branches: Map[Char, (String, Tree)]) private val empty_tree = Tree(Map()) val empty: Lexicon = new Lexicon(empty_tree) def apply(elems: String*): Lexicon = empty ++ elems } final class Lexicon private(rep: Lexicon.Tree) { /* auxiliary operations */ private def dest(tree: Lexicon.Tree, result: List[String]): List[String] = tree.branches.toList.foldLeft(result) { case (res, (_, (s, tr))) => if (s.isEmpty) dest(tr, res) else dest(tr, s :: res) } private def lookup(str: CharSequence): Option[(Boolean, Lexicon.Tree)] = { val len = str.length @tailrec def look(tree: Lexicon.Tree, tip: Boolean, i: Int): Option[(Boolean, Lexicon.Tree)] = { if (i < len) { tree.branches.get(str.charAt(i)) match { case Some((s, tr)) => look(tr, s.nonEmpty, i + 1) case None => None } } else Some(tip, tree) } look(rep, false, 0) } def completions(str: CharSequence): List[String] = lookup(str) match { case Some((true, tree)) => dest(tree, List(str.toString)) case Some((false, tree)) => dest(tree, Nil) case None => Nil } /* pseudo Set methods */ def raw_iterator: Iterator[String] = dest(rep, Nil).iterator def iterator: Iterator[String] = dest(rep, Nil).sorted.iterator override def toString: String = iterator.mkString("Lexicon(", ", ", ")") def is_empty: Boolean = rep.branches.isEmpty def contains(elem: String): Boolean = lookup(elem) match { case Some((tip, _)) => tip case _ => false } /* build lexicon */ def + (elem: String): Lexicon = if (contains(elem)) this else { val len = elem.length def extend(tree: Lexicon.Tree, i: Int): Lexicon.Tree = if (i < len) { val c = elem.charAt(i) val end = (i + 1 == len) tree.branches.get(c) match { case Some((s, tr)) => Lexicon.Tree(tree.branches + (c -> (if (end) elem else s, extend(tr, i + 1)))) case None => Lexicon.Tree(tree.branches + (c -> (if (end) elem else "", extend(Lexicon.empty_tree, i + 1)))) } } else tree new Lexicon(extend(rep, 0)) } def ++ (elems: IterableOnce[String]): Lexicon = elems.iterator.foldLeft(this)(_ + _) def ++ (other: Lexicon): Lexicon = if (this eq other) this else if (is_empty) other else this ++ other.raw_iterator def -- (remove: Iterable[String]): Lexicon = if (remove.exists(contains)) Lexicon.empty ++ iterator.filterNot(a => remove.exists(b => a == b)) else this /* scan */ def scan(in: Reader[Char]): String = { val source = in.source val offset = in.offset val len = source.length - offset @tailrec def scan_tree(tree: Lexicon.Tree, result: String, i: Int): String = { if (i < len) { tree.branches.get(source.charAt(offset + i)) match { case Some((s, tr)) => scan_tree(tr, if (s.isEmpty) result else s, i + 1) case None => result } } else result } scan_tree(rep, "", 0) } } /** read stream without decoding: efficient length operation **/ private class Restricted_Seq(seq: IndexedSeq[Char], start: Int, end: Int) extends CharSequence { def charAt(i: Int): Char = if (0 <= i && i < length) seq(start + i) else throw new IndexOutOfBoundsException def length: Int = end - start // avoid expensive seq.length def subSequence(i: Int, j: Int): CharSequence = if (0 <= i && i <= j && j <= length) new Restricted_Seq(seq, start + i, start + j) else throw new IndexOutOfBoundsException override def toString: String = { val buf = new StringBuilder(length) for (offset <- start until end) buf.append(seq(offset)) buf.toString } } abstract class Byte_Reader extends Reader[Char] with AutoCloseable private def make_byte_reader(stream: InputStream, stream_length: Int): Byte_Reader = { val buffered_stream = new BufferedInputStream(stream) val seq = new PagedSeq( (buf: Array[Char], offset: Int, length: Int) => { var i = 0 var c = 0 var eof = false while (!eof && i < length) { c = buffered_stream.read if (c == -1) eof = true else { buf(offset + i) = c.toChar; i += 1 } } if (i > 0) i else -1 }) val restricted_seq = new Restricted_Seq(seq, 0, stream_length) class Paged_Reader(override val offset: Int) extends Byte_Reader { override lazy val source: CharSequence = restricted_seq def first: Char = if (seq.isDefinedAt(offset)) seq(offset) else '\u001a' def rest: Paged_Reader = if (seq.isDefinedAt(offset)) new Paged_Reader(offset + 1) else this def pos: InputPosition = new OffsetPosition(source, offset) def atEnd: Boolean = !seq.isDefinedAt(offset) override def drop(n: Int): Paged_Reader = new Paged_Reader(offset + n) def close(): Unit = buffered_stream.close() } new Paged_Reader(0) } def byte_reader(file: JFile): Byte_Reader = make_byte_reader(new FileInputStream(file), file.length.toInt) def byte_reader(url: URL): Byte_Reader = { val connection = url.openConnection val stream = connection.getInputStream val stream_length = connection.getContentLength make_byte_reader(stream, stream_length) } def reader_is_utf8(reader: Reader[Char]): Boolean = reader.isInstanceOf[Byte_Reader] def reader_decode_utf8(is_utf8: Boolean, s: String): String = if (is_utf8) UTF8.decode_permissive(s) else s def reader_decode_utf8(reader: Reader[Char], s: String): String = reader_decode_utf8(reader_is_utf8(reader), s) /* plain text reader */ def char_reader(text: CharSequence): CharSequenceReader = new CharSequenceReader(text) } diff --git a/src/Pure/Isar/args.ML b/src/Pure/Isar/args.ML --- a/src/Pure/Isar/args.ML +++ b/src/Pure/Isar/args.ML @@ -1,199 +1,189 @@ (* Title: Pure/Isar/args.ML Author: Markus Wenzel, TU Muenchen Quasi-inner syntax based on outer tokens: concrete argument syntax of attributes, methods etc. *) signature ARGS = sig val context: Proof.context context_parser val theory: theory context_parser val symbolic: Token.T parser val $$$ : string -> string parser val add: string parser val del: string parser val colon: string parser val query: string parser val bang: string parser val query_colon: string parser val bang_colon: string parser val parens: 'a parser -> 'a parser val bracks: 'a parser -> 'a parser val mode: string -> bool parser val maybe: 'a parser -> 'a option parser val name_token: Token.T parser val name: string parser val name_position: (string * Position.T) parser val cartouche_inner_syntax: string parser val cartouche_input: Input.source parser - val text_token: Token.T parser - val text_input: Input.source parser - val text: string parser val binding: binding parser val alt_name: string parser val liberal_name: string parser val var: indexname parser val internal_source: Token.src parser val internal_name: Token.name_value parser val internal_typ: typ parser val internal_term: term parser val internal_fact: thm list parser val internal_attribute: (morphism -> attribute) parser val internal_declaration: declaration parser val named_source: (Token.T -> Token.src) -> Token.src parser val named_typ: (string -> typ) -> typ parser val named_term: (string -> term) -> term parser val named_fact: (string -> string option * thm list) -> thm list parser val named_attribute: (string * Position.T -> morphism -> attribute) -> (morphism -> attribute) parser - val text_declaration: (Input.source -> declaration) -> declaration parser - val cartouche_declaration: (Input.source -> declaration) -> declaration parser + val embedded_declaration: (Input.source -> declaration) -> declaration parser val typ_abbrev: typ context_parser val typ: typ context_parser val term: term context_parser val term_pattern: term context_parser val term_abbrev: term context_parser val prop: term context_parser val type_name: {proper: bool, strict: bool} -> string context_parser val const: {proper: bool, strict: bool} -> string context_parser val goal_spec: ((int -> tactic) -> tactic) context_parser end; structure Args: ARGS = struct (** argument scanners **) (* context *) fun context x = (Scan.state >> Context.proof_of) x; fun theory x = (Scan.state >> Context.theory_of) x; (* basic *) val ident = Parse.token (Parse.short_ident || Parse.long_ident || Parse.sym_ident || Parse.term_var || Parse.type_ident || Parse.type_var || Parse.number); val string = Parse.token Parse.string; val alt_string = Parse.token (Parse.alt_string || Parse.cartouche); val symbolic = Parse.token (Parse.keyword_with Token.ident_or_symbolic); fun $$$ x = (ident || Parse.token Parse.keyword) :|-- (fn tok => let val y = Token.content_of tok in if x = y then (Token.assign (SOME (Token.Literal (false, Markup.quasi_keyword))) tok; Scan.succeed x) else Scan.fail end); val add = $$$ "add"; val del = $$$ "del"; val colon = $$$ ":"; val query = $$$ "?"; val bang = $$$ "!"; val query_colon = $$$ "?" ^^ $$$ ":"; val bang_colon = $$$ "!" ^^ $$$ ":"; fun parens scan = $$$ "(" |-- scan --| $$$ ")"; fun bracks scan = $$$ "[" |-- scan --| $$$ "]"; fun mode s = Scan.optional (parens ($$$ s) >> K true) false; fun maybe scan = $$$ "_" >> K NONE || scan >> SOME; val name_token = ident || string; val name = name_token >> Token.content_of; val name_position = name_token >> (Input.source_content o Token.input_of); val cartouche = Parse.token Parse.cartouche; val cartouche_inner_syntax = cartouche >> Token.inner_syntax_of; val cartouche_input = cartouche >> Token.input_of; -val text_token = Parse.token (Parse.embedded || Parse.verbatim); -val text_input = text_token >> Token.input_of; -val text = text_token >> Token.content_of; - val binding = Parse.input name >> (Binding.make o Input.source_content); val alt_name = alt_string >> Token.content_of; val liberal_name = (symbolic >> Token.content_of) || name; val var = (ident >> Token.content_of) :|-- (fn x => (case Lexicon.read_variable x of SOME v => Scan.succeed v | NONE => Scan.fail)); (* values *) fun value dest = Scan.some (fn arg => (case Token.get_value arg of SOME v => (SOME (dest v) handle Match => NONE) | NONE => NONE)); val internal_source = value (fn Token.Source src => src); val internal_name = value (fn Token.Name (a, _) => a); val internal_typ = value (fn Token.Typ T => T); val internal_term = value (fn Token.Term t => t); val internal_fact = value (fn Token.Fact (_, ths) => ths); val internal_attribute = value (fn Token.Attribute att => att); val internal_declaration = value (fn Token.Declaration decl => decl); fun named_source read = internal_source || name_token >> Token.evaluate Token.Source read; fun named_typ read = internal_typ || Parse.token Parse.embedded >> Token.evaluate Token.Typ (read o Token.inner_syntax_of); fun named_term read = internal_term || Parse.token Parse.embedded >> Token.evaluate Token.Term (read o Token.inner_syntax_of); fun named_fact get = internal_fact || name_token >> Token.evaluate Token.Fact (get o Token.content_of) >> #2 || alt_string >> Token.evaluate Token.Fact (get o Token.inner_syntax_of) >> #2; fun named_attribute att = internal_attribute || name_token >> Token.evaluate Token.Attribute (fn tok => att (Token.content_of tok, Token.pos_of tok)); -fun text_declaration read = - internal_declaration || text_token >> Token.evaluate Token.Declaration (read o Token.input_of); - -fun cartouche_declaration read = - internal_declaration || cartouche >> Token.evaluate Token.Declaration (read o Token.input_of); +fun embedded_declaration read = + internal_declaration || + Parse.token Parse.embedded >> Token.evaluate Token.Declaration (read o Token.input_of); (* terms and types *) val typ_abbrev = Scan.peek (named_typ o Proof_Context.read_typ_abbrev o Context.proof_of); val typ = Scan.peek (named_typ o Syntax.read_typ o Context.proof_of); val term = Scan.peek (named_term o Syntax.read_term o Context.proof_of); val term_pattern = Scan.peek (named_term o Proof_Context.read_term_pattern o Context.proof_of); val term_abbrev = Scan.peek (named_term o Proof_Context.read_term_abbrev o Context.proof_of); val prop = Scan.peek (named_term o Syntax.read_prop o Context.proof_of); (* type and constant names *) fun type_name flags = Scan.peek (named_typ o Proof_Context.read_type_name flags o Context.proof_of) >> (fn Type (c, _) => c | TFree (a, _) => a | _ => ""); fun const flags = Scan.peek (named_term o Proof_Context.read_const flags o Context.proof_of) >> (fn Const (c, _) => c | Free (x, _) => x | _ => ""); (* improper method arguments *) val from_to = Parse.nat -- ($$$ "-" |-- Parse.nat) >> (fn (i, j) => fn tac => Seq.INTERVAL tac i j) || Parse.nat --| $$$ "-" >> (fn i => fn tac => fn st => Seq.INTERVAL tac i (Thm.nprems_of st) st) || Parse.nat >> (fn i => fn tac => tac i) || $$$ "!" >> K ALLGOALS; val goal = Parse.keyword_improper "[" |-- Parse.!!! (from_to --| Parse.keyword_improper "]"); fun goal_spec x = Scan.lift (Scan.optional goal (fn tac => tac 1)) x; end; diff --git a/src/Pure/Isar/method.ML b/src/Pure/Isar/method.ML --- a/src/Pure/Isar/method.ML +++ b/src/Pure/Isar/method.ML @@ -1,830 +1,830 @@ (* Title: Pure/Isar/method.ML Author: Markus Wenzel, TU Muenchen Isar proof methods. *) signature METHOD = sig type method = thm list -> context_tactic val CONTEXT_METHOD: (thm list -> context_tactic) -> method val METHOD: (thm list -> tactic) -> method val fail: method val succeed: method val insert_tac: Proof.context -> thm list -> int -> tactic val insert: thm list -> method val SIMPLE_METHOD: tactic -> method val SIMPLE_METHOD': (int -> tactic) -> method val SIMPLE_METHOD'': ((int -> tactic) -> tactic) -> (int -> tactic) -> method val goal_cases_tac: string list -> context_tactic val cheating: bool -> method val intro: Proof.context -> thm list -> method val elim: Proof.context -> thm list -> method val unfold: thm list -> Proof.context -> method val fold: thm list -> Proof.context -> method val atomize: bool -> Proof.context -> method val this: Proof.context -> method val fact: thm list -> Proof.context -> method val assm_tac: Proof.context -> int -> tactic val all_assm_tac: Proof.context -> tactic val assumption: Proof.context -> method val rule_trace: bool Config.T val trace: Proof.context -> thm list -> unit val rule_tac: Proof.context -> thm list -> thm list -> int -> tactic val some_rule_tac: Proof.context -> thm list -> thm list -> int -> tactic val intros_tac: Proof.context -> thm list -> thm list -> tactic val try_intros_tac: Proof.context -> thm list -> thm list -> tactic val rule: Proof.context -> thm list -> method val erule: Proof.context -> int -> thm list -> method val drule: Proof.context -> int -> thm list -> method val frule: Proof.context -> int -> thm list -> method val method_space: Context.generic -> Name_Space.T val set_tactic: (morphism -> thm list -> tactic) -> Context.generic -> Context.generic val clean_facts: thm list -> thm list val set_facts: thm list -> Proof.context -> Proof.context val get_facts: Proof.context -> thm list type combinator_info val no_combinator_info: combinator_info datatype combinator = Then | Then_All_New | Orelse | Try | Repeat1 | Select_Goals of int datatype text = Source of Token.src | Basic of Proof.context -> method | Combinator of combinator_info * combinator * text list val map_source: (Token.src -> Token.src) -> text -> text val primitive_text: (Proof.context -> thm -> thm) -> text val succeed_text: text val standard_text: text val this_text: text val done_text: text val sorry_text: bool -> text val finish_text: text option * bool -> text val print_methods: bool -> Proof.context -> unit val check_name: Proof.context -> xstring * Position.T -> string val check_src: Proof.context -> Token.src -> Token.src val check_text: Proof.context -> text -> text val checked_text: text -> bool val method_syntax: (Proof.context -> method) context_parser -> Token.src -> Proof.context -> method val setup: binding -> (Proof.context -> method) context_parser -> string -> theory -> theory val local_setup: binding -> (Proof.context -> method) context_parser -> string -> local_theory -> local_theory val method_setup: bstring * Position.T -> Input.source -> string -> local_theory -> local_theory val method: Proof.context -> Token.src -> Proof.context -> method val method_closure: Proof.context -> Token.src -> Token.src val closure: bool Config.T val method_cmd: Proof.context -> Token.src -> Proof.context -> method val detect_closure_state: thm -> bool val STATIC: (unit -> unit) -> context_tactic val RUNTIME: context_tactic -> context_tactic val sleep: Time.time -> context_tactic val evaluate: text -> Proof.context -> method val evaluate_runtime: text -> Proof.context -> method type modifier = {init: Proof.context -> Proof.context, attribute: attribute, pos: Position.T} val modifier: attribute -> Position.T -> modifier val old_section_parser: bool Config.T val sections: modifier parser list -> unit context_parser type text_range = text * Position.range val text: text_range option -> text option val position: text_range option -> Position.T val reports_of: text_range -> Position.report list val report: text_range -> unit val parser: int -> text_range parser val parse: text_range parser val parse_by: ((text_range * text_range option) * Position.report list) parser val read: Proof.context -> Token.src -> text val read_closure: Proof.context -> Token.src -> text * Token.src val read_closure_input: Proof.context -> Input.source -> text * Token.src val text_closure: text context_parser end; structure Method: METHOD = struct (** proof methods **) (* type method *) type method = thm list -> context_tactic; fun CONTEXT_METHOD tac : method = fn facts => CONTEXT_TACTIC (ALLGOALS Goal.conjunction_tac) #> Seq.maps_results (tac facts); fun METHOD tac : method = fn facts => CONTEXT_TACTIC (ALLGOALS Goal.conjunction_tac THEN tac facts); val fail = METHOD (K no_tac); val succeed = METHOD (K all_tac); (* insert facts *) fun insert_tac _ [] _ = all_tac | insert_tac ctxt facts i = EVERY (map (fn r => resolve_tac ctxt [Thm.forall_intr_vars r COMP_INCR revcut_rl] i) facts); fun insert thms = CONTEXT_METHOD (fn _ => fn (ctxt, st) => st |> ALLGOALS (insert_tac ctxt thms) |> TACTIC_CONTEXT ctxt); fun SIMPLE_METHOD tac = CONTEXT_METHOD (fn facts => fn (ctxt, st) => st |> (ALLGOALS (insert_tac ctxt facts) THEN tac) |> TACTIC_CONTEXT ctxt); fun SIMPLE_METHOD'' quant tac = CONTEXT_METHOD (fn facts => fn (ctxt, st) => st |> quant (insert_tac ctxt facts THEN' tac) |> TACTIC_CONTEXT ctxt); val SIMPLE_METHOD' = SIMPLE_METHOD'' HEADGOAL; (* goals as cases *) fun goal_cases_tac case_names : context_tactic = fn (ctxt, st) => let val cases = (if null case_names then map string_of_int (1 upto Thm.nprems_of st) else case_names) |> map (rpair [] o rpair []) |> Rule_Cases.make_common ctxt (Thm.prop_of (Rule_Cases.internalize_params st)); in CONTEXT_CASES cases all_tac (ctxt, st) end; (* cheating *) fun cheating int = CONTEXT_METHOD (fn _ => fn (ctxt, st) => if int orelse Config.get ctxt quick_and_dirty then TACTIC_CONTEXT ctxt (ALLGOALS (Skip_Proof.cheat_tac ctxt) st) else error "Cheating requires quick_and_dirty mode!"); (* unfold intro/elim rules *) fun intro ctxt ths = SIMPLE_METHOD' (CHANGED_PROP o REPEAT_ALL_NEW (match_tac ctxt ths)); fun elim ctxt ths = SIMPLE_METHOD' (CHANGED_PROP o REPEAT_ALL_NEW (ematch_tac ctxt ths)); (* unfold/fold definitions *) fun unfold_meth ths ctxt = SIMPLE_METHOD (CHANGED_PROP (Local_Defs.unfold_tac ctxt ths)); fun fold_meth ths ctxt = SIMPLE_METHOD (CHANGED_PROP (Local_Defs.fold_tac ctxt ths)); (* atomize rule statements *) fun atomize false ctxt = SIMPLE_METHOD' (CHANGED_PROP o Object_Logic.atomize_prems_tac ctxt) | atomize true ctxt = Context_Tactic.CONTEXT_TACTIC o K (HEADGOAL (CHANGED_PROP o Object_Logic.full_atomize_tac ctxt)); (* this -- resolve facts directly *) fun this ctxt = METHOD (EVERY o map (HEADGOAL o resolve_tac ctxt o single)); (* fact -- composition by facts from context *) fun fact [] ctxt = SIMPLE_METHOD' (Proof_Context.some_fact_tac ctxt) | fact rules ctxt = SIMPLE_METHOD' (Proof_Context.fact_tac ctxt rules); (* assumption *) local fun cond_rtac ctxt cond rule = SUBGOAL (fn (prop, i) => if cond (Logic.strip_assums_concl prop) then resolve_tac ctxt [rule] i else no_tac); in fun assm_tac ctxt = assume_tac ctxt APPEND' Goal.assume_rule_tac ctxt APPEND' cond_rtac ctxt (can Logic.dest_equals) Drule.reflexive_thm APPEND' cond_rtac ctxt (can Logic.dest_term) Drule.termI; fun all_assm_tac ctxt = let fun tac i st = if i > Thm.nprems_of st then all_tac st else ((assm_tac ctxt i THEN tac i) ORELSE tac (i + 1)) st; in tac 1 end; fun assumption ctxt = METHOD (HEADGOAL o (fn [] => assm_tac ctxt | [fact] => solve_tac ctxt [fact] | _ => K no_tac)); fun finish immed ctxt = METHOD (K ((if immed then all_assm_tac ctxt else all_tac) THEN flexflex_tac ctxt)); end; (* rule etc. -- single-step refinements *) val rule_trace = Attrib.setup_config_bool \<^binding>\rule_trace\ (fn _ => false); fun trace ctxt rules = if Config.get ctxt rule_trace andalso not (null rules) then Pretty.big_list "rules:" (map (Thm.pretty_thm_item ctxt) rules) |> Pretty.string_of |> tracing else (); local fun gen_rule_tac tac ctxt rules facts = (fn i => fn st => if null facts then tac ctxt rules i st else Seq.maps (fn rule => (tac ctxt o single) rule i st) (Drule.multi_resolves (SOME ctxt) facts rules)) THEN_ALL_NEW Goal.norm_hhf_tac ctxt; fun gen_arule_tac tac ctxt j rules facts = EVERY' (gen_rule_tac tac ctxt rules facts :: replicate j (assume_tac ctxt)); fun gen_some_rule_tac tac ctxt arg_rules facts = SUBGOAL (fn (goal, i) => let val rules = if not (null arg_rules) then arg_rules else flat (Context_Rules.find_rules ctxt false facts goal); in trace ctxt rules; tac ctxt rules facts i end); fun meth tac x y = METHOD (HEADGOAL o tac x y); fun meth' tac x y z = METHOD (HEADGOAL o tac x y z); in val rule_tac = gen_rule_tac resolve_tac; val rule = meth rule_tac; val some_rule_tac = gen_some_rule_tac rule_tac; val some_rule = meth some_rule_tac; val erule = meth' (gen_arule_tac eresolve_tac); val drule = meth' (gen_arule_tac dresolve_tac); val frule = meth' (gen_arule_tac forward_tac); end; (* intros_tac -- pervasive search spanned by intro rules *) fun gen_intros_tac goals ctxt intros facts = goals (insert_tac ctxt facts THEN' REPEAT_ALL_NEW (resolve_tac ctxt intros)) THEN Tactic.distinct_subgoals_tac; val intros_tac = gen_intros_tac ALLGOALS; val try_intros_tac = gen_intros_tac TRYALL; (** method syntax **) (* context data *) structure Data = Generic_Data ( type T = {methods: ((Token.src -> Proof.context -> method) * string) Name_Space.table, ml_tactic: (morphism -> thm list -> tactic) option, facts: thm list option}; val empty : T = {methods = Name_Space.empty_table Markup.methodN, ml_tactic = NONE, facts = NONE}; fun merge ({methods = methods1, ml_tactic = ml_tactic1, facts = facts1}, {methods = methods2, ml_tactic = ml_tactic2, facts = facts2}) : T = {methods = Name_Space.merge_tables (methods1, methods2), ml_tactic = merge_options (ml_tactic1, ml_tactic2), facts = merge_options (facts1, facts2)}; ); fun map_data f = Data.map (fn {methods, ml_tactic, facts} => let val (methods', ml_tactic', facts') = f (methods, ml_tactic, facts) in {methods = methods', ml_tactic = ml_tactic', facts = facts'} end); val get_methods = #methods o Data.get; val ops_methods = {get_data = get_methods, put_data = fn methods => map_data (fn (_, ml_tactic, facts) => (methods, ml_tactic, facts))}; val method_space = Name_Space.space_of_table o get_methods; (* ML tactic *) fun set_tactic ml_tactic = map_data (fn (methods, _, facts) => (methods, SOME ml_tactic, facts)); fun the_tactic context = (case #ml_tactic (Data.get context) of SOME tac => tac | NONE => raise Fail "Undefined ML tactic"); val parse_tactic = Scan.state :|-- (fn context => - Scan.lift (Args.text_declaration (fn source => + Scan.lift (Args.embedded_declaration (fn source => let val tac = context |> ML_Context.expression (Input.pos_of source) (ML_Lex.read "Context.>> (Method.set_tactic (fn morphism: Morphism.morphism => fn facts: thm list => (" @ ML_Lex.read_source source @ ML_Lex.read ")))") |> the_tactic; in fn phi => set_tactic (fn _ => Context.setmp_generic_context (SOME context) (tac phi)) end)) >> (fn decl => Morphism.form (the_tactic (Morphism.form decl context)))); (* method facts *) val clean_facts = filter_out Thm.is_dummy; fun set_facts facts = (Context.proof_map o map_data) (fn (methods, ml_tactic, _) => (methods, ml_tactic, SOME (clean_facts facts))); val get_facts_generic = these o #facts o Data.get; val get_facts = get_facts_generic o Context.Proof; val _ = Theory.setup (Global_Theory.add_thms_dynamic (Binding.make ("method_facts", \<^here>), get_facts_generic)); (* method text *) datatype combinator_info = Combinator_Info of {keywords: Position.T list}; fun combinator_info keywords = Combinator_Info {keywords = keywords}; val no_combinator_info = combinator_info []; datatype combinator = Then | Then_All_New | Orelse | Try | Repeat1 | Select_Goals of int; datatype text = Source of Token.src | Basic of Proof.context -> method | Combinator of combinator_info * combinator * text list; fun map_source f (Source src) = Source (f src) | map_source _ (Basic meth) = Basic meth | map_source f (Combinator (info, comb, txts)) = Combinator (info, comb, map (map_source f) txts); fun primitive_text r = Basic (SIMPLE_METHOD o PRIMITIVE o r); val succeed_text = Basic (K succeed); val standard_text = Source (Token.make_src ("standard", Position.none) []); val this_text = Basic this; val done_text = Basic (K (SIMPLE_METHOD all_tac)); fun sorry_text int = Basic (fn _ => cheating int); fun finish_text (NONE, immed) = Basic (finish immed) | finish_text (SOME txt, immed) = Combinator (no_combinator_info, Then, [txt, Basic (finish immed)]); (* method definitions *) fun print_methods verbose ctxt = let val meths = get_methods (Context.Proof ctxt); fun prt_meth (name, (_, "")) = Pretty.mark_str name | prt_meth (name, (_, comment)) = Pretty.block (Pretty.mark_str name :: Pretty.str ":" :: Pretty.brk 2 :: Pretty.text comment); in [Pretty.big_list "methods:" (map prt_meth (Name_Space.markup_table verbose ctxt meths))] |> Pretty.writeln_chunks end; (* define *) fun define_global binding meth comment = Entity.define_global ops_methods binding (meth, comment); fun define binding meth comment = Entity.define ops_methods binding (meth, comment); (* check *) fun check_name ctxt = let val context = Context.Proof ctxt in #1 o Name_Space.check context (get_methods context) end; fun check_src ctxt = #1 o Token.check_src ctxt (get_methods o Context.Proof); fun check_text ctxt (Source src) = Source (check_src ctxt src) | check_text _ (Basic m) = Basic m | check_text ctxt (Combinator (x, y, body)) = Combinator (x, y, map (check_text ctxt) body); fun checked_text (Source src) = Token.checked_src src | checked_text (Basic _) = true | checked_text (Combinator (_, _, body)) = forall checked_text body; val _ = Theory.setup (ML_Antiquotation.inline_embedded \<^binding>\method\ (Args.context -- Scan.lift Parse.embedded_position >> (ML_Syntax.print_string o uncurry check_name))); (* method setup *) fun method_syntax scan src ctxt : method = let val (m, ctxt') = Token.syntax scan src ctxt in m ctxt' end; fun setup binding scan comment = define_global binding (method_syntax scan) comment #> snd; fun local_setup binding scan comment = define binding (method_syntax scan) comment #> snd; fun method_setup binding source comment = ML_Context.expression (Input.pos_of source) (ML_Lex.read ("Theory.local_setup (Method.local_setup (" ^ ML_Syntax.make_binding binding ^ ") (") @ ML_Lex.read_source source @ ML_Lex.read (")" ^ ML_Syntax.print_string comment ^ ")")) |> Context.proof_map; (* prepare methods *) fun method ctxt = let val table = get_methods (Context.Proof ctxt) in fn src => #1 (Name_Space.get table (#1 (Token.name_of_src src))) src end; fun method_closure ctxt src = let val src' = map Token.init_assignable src; val ctxt' = Context_Position.not_really ctxt; val _ = Seq.pull (method ctxt' src' ctxt' [] (ctxt', Goal.protect 0 Drule.dummy_thm)); in map Token.closure src' end; val closure = Config.declare_bool ("Method.closure", \<^here>) (K true); fun method_cmd ctxt = check_src ctxt #> Config.get ctxt closure ? method_closure ctxt #> method ctxt; (* static vs. runtime state *) fun detect_closure_state st = (case try Logic.dest_term (Thm.concl_of (perhaps (try Goal.conclude) st)) of NONE => false | SOME t => Term.is_dummy_pattern t); fun STATIC test : context_tactic = fn (ctxt, st) => if detect_closure_state st then (test (); Seq.single (Seq.Result (ctxt, st))) else Seq.empty; fun RUNTIME (tac: context_tactic) (ctxt, st) = if detect_closure_state st then Seq.empty else tac (ctxt, st); fun sleep t = RUNTIME (fn ctxt_st => (OS.Process.sleep t; Seq.single (Seq.Result ctxt_st))); (* evaluate method text *) local val op THEN = Seq.THEN; fun BYPASS_CONTEXT (tac: tactic) = fn result => (case result of Seq.Error _ => Seq.single result | Seq.Result (ctxt, st) => tac st |> TACTIC_CONTEXT ctxt); val preparation = BYPASS_CONTEXT (ALLGOALS Goal.conjunction_tac); fun RESTRICT_GOAL i n method = BYPASS_CONTEXT (PRIMITIVE (Goal.restrict i n)) THEN method THEN BYPASS_CONTEXT (PRIMITIVE (Goal.unrestrict i)); fun SELECT_GOAL method i = RESTRICT_GOAL i 1 method; fun (method1 THEN_ALL_NEW method2) i (result : context_state Seq.result) = (case result of Seq.Error _ => Seq.single result | Seq.Result (_, st) => result |> method1 i |> Seq.maps (fn result' => (case result' of Seq.Error _ => Seq.single result' | Seq.Result (_, st') => result' |> Seq.INTERVAL method2 i (i + Thm.nprems_of st' - Thm.nprems_of st)))) fun COMBINATOR1 comb [meth] = comb meth | COMBINATOR1 _ _ = raise Fail "Method combinator requires exactly one argument"; fun combinator Then = Seq.EVERY | combinator Then_All_New = (fn [] => Seq.single | methods => preparation THEN (foldl1 (op THEN_ALL_NEW) (map SELECT_GOAL methods) 1)) | combinator Orelse = Seq.FIRST | combinator Try = COMBINATOR1 Seq.TRY | combinator Repeat1 = COMBINATOR1 Seq.REPEAT1 | combinator (Select_Goals n) = COMBINATOR1 (fn method => preparation THEN RESTRICT_GOAL 1 n method); in fun evaluate text ctxt0 facts = let val ctxt = set_facts facts ctxt0; fun eval0 m = Seq.single #> Seq.maps_results (m facts); fun eval (Basic m) = eval0 (m ctxt) | eval (Source src) = eval0 (method_cmd ctxt src ctxt) | eval (Combinator (_, c, txts)) = combinator c (map eval txts); in eval text o Seq.Result end; end; fun evaluate_runtime text ctxt = let val text' = text |> (map_source o map o Token.map_facts) (fn SOME name => (case Proof_Context.lookup_fact ctxt name of SOME {dynamic = true, thms} => K thms | _ => I) | NONE => I); val ctxt' = Config.put closure false ctxt; in fn facts => RUNTIME (fn st => evaluate text' ctxt' facts st) end; (** concrete syntax **) (* type modifier *) type modifier = {init: Proof.context -> Proof.context, attribute: attribute, pos: Position.T}; fun modifier attribute pos : modifier = {init = I, attribute = attribute, pos = pos}; (* sections *) val old_section_parser = Config.declare_bool ("Method.old_section_parser", \<^here>) (K false); local fun thms ss = Scan.repeats (Scan.unless (Scan.lift (Scan.first ss)) Attrib.multi_thm); fun app {init, attribute, pos = _} ths context = fold_map (Thm.apply_attribute attribute) ths (Context.map_proof init context); fun section ss = Scan.depend (fn context => (Scan.first ss -- Scan.pass context (thms ss)) :|-- (fn (m, ths) => Scan.succeed (swap (app m ths context)))); in fun old_sections ss = Scan.repeat (section ss) >> K (); end; local fun sect (modifier : modifier parser) = Scan.depend (fn context => Scan.ahead Parse.not_eof -- Scan.trace modifier -- Scan.repeat (Scan.unless modifier Parse.thm) >> (fn ((tok0, ({init, attribute, pos}, modifier_toks)), xthms) => let val decl = (case Token.get_value tok0 of SOME (Token.Declaration decl) => decl | _ => let val ctxt = Context.proof_of context; val prep_att = Attrib.check_src ctxt #> map (Token.assign NONE); val thms = map (fn (a, bs) => (Proof_Context.get_fact ctxt a, map prep_att bs)) xthms; val facts = Attrib.partial_evaluation ctxt [((Binding.name "dummy", []), thms)] |> map (fn (_, bs) => ((Binding.empty, [Attrib.internal (K attribute)]), bs)); fun decl phi = Context.mapping I init #> Attrib.generic_notes "" (Attrib.transform_facts phi facts) #> snd; val modifier_report = (#1 (Token.range_of modifier_toks), Position.entity_markup Markup.method_modifierN ("", pos)); val _ = Context_Position.reports ctxt (modifier_report :: Token.reports_of_value tok0); val _ = Token.assign (SOME (Token.Declaration decl)) tok0; in decl end); in (Morphism.form decl context, decl) end)); in fun sections ss = Args.context :|-- (fn ctxt => if Config.get ctxt old_section_parser then old_sections ss else Scan.repeat (sect (Scan.first ss)) >> K ()); end; (* extra rule methods *) fun xrule_meth meth = Scan.lift (Scan.optional (Args.parens Parse.nat) 0) -- Attrib.thms >> (fn (n, ths) => fn ctxt => meth ctxt n ths); (* text range *) type text_range = text * Position.range; fun text NONE = NONE | text (SOME (txt, _)) = SOME txt; fun position NONE = Position.none | position (SOME (_, (pos, _))) = pos; (* reports *) local fun keyword_positions (Source _) = [] | keyword_positions (Basic _) = [] | keyword_positions (Combinator (Combinator_Info {keywords}, _, texts)) = keywords @ maps keyword_positions texts; in fun reports_of ((text, (pos, _)): text_range) = (pos, Markup.language_method) :: maps (fn p => map (pair p) (Markup.keyword3 :: Completion.suppress_abbrevs "")) (keyword_positions text); fun report text_range = if Context_Position.pide_reports () then Position.reports (reports_of text_range) else (); end; (* parser *) local fun is_symid_meth s = s <> "|" andalso s <> "?" andalso s <> "+" andalso Token.ident_or_symbolic s; in fun parser pri = let val meth_name = Parse.token Parse.name; fun meth5 x = (meth_name >> (Source o single) || Scan.ahead Parse.cartouche |-- Parse.not_eof >> (fn tok => Source (Token.make_src ("cartouche", Position.none) [tok])) || Parse.$$$ "(" |-- Parse.!!! (meth0 --| Parse.$$$ ")")) x and meth4 x = (meth5 -- Parse.position (Parse.$$$ "?") >> (fn (m, (_, pos)) => Combinator (combinator_info [pos], Try, [m])) || meth5 -- Parse.position (Parse.$$$ "+") >> (fn (m, (_, pos)) => Combinator (combinator_info [pos], Repeat1, [m])) || meth5 -- (Parse.position (Parse.$$$ "[") -- Scan.optional Parse.nat 1 -- Parse.position (Parse.$$$ "]")) >> (fn (m, (((_, pos1), n), (_, pos2))) => Combinator (combinator_info [pos1, pos2], Select_Goals n, [m])) || meth5) x and meth3 x = (meth_name ::: Parse.args1 is_symid_meth >> Source || meth4) x and meth2 x = (Parse.enum1_positions "," meth3 >> (fn ([m], _) => m | (ms, ps) => Combinator (combinator_info ps, Then, ms))) x and meth1 x = (Parse.enum1_positions ";" meth2 >> (fn ([m], _) => m | (ms, ps) => Combinator (combinator_info ps, Then_All_New, ms))) x and meth0 x = (Parse.enum1_positions "|" meth1 >> (fn ([m], _) => m | (ms, ps) => Combinator (combinator_info ps, Orelse, ms))) x; val meth = nth [meth0, meth1, meth2, meth3, meth4, meth5] pri handle General.Subscript => raise Fail ("Bad method parser priority " ^ string_of_int pri); in Scan.trace meth >> (fn (m, toks) => (m, Token.range_of toks)) end; val parse = parser 4; end; val parse_by = Parse.$$$ "by" |-- parse -- Scan.option parse >> (fn (m1, m2) => ((m1, m2), maps reports_of (m1 :: the_list m2))); (* read method text *) fun read ctxt src = (case Scan.read Token.stopper (Parse.!!! (parser 0 --| Scan.ahead Parse.eof)) src of SOME (text, range) => if checked_text text then text else (report (text, range); check_text ctxt text) | NONE => error ("Failed to parse method" ^ Position.here (#1 (Token.range_of src)))); fun read_closure ctxt src0 = let val src1 = map Token.init_assignable src0; val text = read ctxt src1 |> map_source (method_closure ctxt); val src2 = map Token.closure src1; in (text, src2) end; fun read_closure_input ctxt = let val keywords = Keyword.no_major_keywords (Thy_Header.get_keywords' ctxt) in Parse.read_embedded ctxt keywords (Scan.many Token.not_eof) #> read_closure ctxt end; val text_closure = - Args.context -- Scan.lift (Parse.token Parse.text) >> (fn (ctxt, tok) => + Args.context -- Scan.lift (Parse.token Parse.embedded) >> (fn (ctxt, tok) => (case Token.get_value tok of SOME (Token.Source src) => read ctxt src | _ => let val (text, src) = read_closure_input ctxt (Token.input_of tok); val _ = Token.assign (SOME (Token.Source src)) tok; in text end)); (* theory setup *) val _ = Theory.setup (setup \<^binding>\fail\ (Scan.succeed (K fail)) "force failure" #> setup \<^binding>\succeed\ (Scan.succeed (K succeed)) "succeed" #> setup \<^binding>\sleep\ (Scan.lift Parse.real >> (fn s => fn _ => fn _ => sleep (seconds s))) "succeed after delay (in seconds)" #> setup \<^binding>\-\ (Scan.succeed (K (SIMPLE_METHOD all_tac))) "insert current facts, nothing else" #> setup \<^binding>\goal_cases\ (Scan.lift (Scan.repeat Args.name_token) >> (fn names => fn _ => CONTEXT_METHOD (fn _ => fn (ctxt, st) => (case drop (Thm.nprems_of st) names of [] => NONE | bad => if detect_closure_state st then NONE else SOME (fn () => ("Excessive case name(s): " ^ commas_quote (map Token.content_of bad) ^ Position.here (#1 (Token.range_of bad))))) |> (fn SOME msg => Seq.single (Seq.Error msg) | NONE => goal_cases_tac (map Token.content_of names) (ctxt, st))))) "bind cases for goals" #> setup \<^binding>\subproofs\ (text_closure >> (Context_Tactic.SUBPROOFS ooo evaluate_runtime)) "apply proof method to subproofs with closed derivation" #> setup \<^binding>\insert\ (Attrib.thms >> (K o insert)) "insert theorems, ignoring facts" #> setup \<^binding>\intro\ (Attrib.thms >> (fn ths => fn ctxt => intro ctxt ths)) "repeatedly apply introduction rules" #> setup \<^binding>\elim\ (Attrib.thms >> (fn ths => fn ctxt => elim ctxt ths)) "repeatedly apply elimination rules" #> setup \<^binding>\unfold\ (Attrib.thms >> unfold_meth) "unfold definitions" #> setup \<^binding>\fold\ (Attrib.thms >> fold_meth) "fold definitions" #> setup \<^binding>\atomize\ (Scan.lift (Args.mode "full") >> atomize) "present local premises as object-level statements" #> setup \<^binding>\rule\ (Attrib.thms >> (fn ths => fn ctxt => some_rule ctxt ths)) "apply some intro/elim rule" #> setup \<^binding>\erule\ (xrule_meth erule) "apply rule in elimination manner (improper)" #> setup \<^binding>\drule\ (xrule_meth drule) "apply rule in destruct manner (improper)" #> setup \<^binding>\frule\ (xrule_meth frule) "apply rule in forward manner (improper)" #> setup \<^binding>\this\ (Scan.succeed this) "apply current facts as rules" #> setup \<^binding>\fact\ (Attrib.thms >> fact) "composition by facts from context" #> setup \<^binding>\assumption\ (Scan.succeed assumption) "proof by assumption, preferring facts" #> setup \<^binding>\rename_tac\ (Args.goal_spec -- Scan.lift (Scan.repeat1 Args.name) >> (fn (quant, xs) => K (SIMPLE_METHOD'' quant (rename_tac xs)))) "rename parameters of goal" #> setup \<^binding>\rotate_tac\ (Args.goal_spec -- Scan.lift (Scan.optional Parse.int 1) >> (fn (quant, i) => K (SIMPLE_METHOD'' quant (rotate_tac i)))) "rotate assumptions of goal" #> setup \<^binding>\tactic\ (parse_tactic >> (K o METHOD)) "ML tactic as proof method" #> setup \<^binding>\raw_tactic\ (parse_tactic >> (fn tac => fn _ => Context_Tactic.CONTEXT_TACTIC o tac)) "ML tactic as raw proof method" #> setup \<^binding>\use\ (Attrib.thms -- (Scan.lift (Parse.$$$ "in") |-- text_closure) >> (fn (thms, text) => fn ctxt => fn _ => evaluate_runtime text ctxt thms)) "indicate method facts and context for method expression"); (*final declarations of this structure!*) val unfold = unfold_meth; val fold = fold_meth; end; val CONTEXT_METHOD = Method.CONTEXT_METHOD; val METHOD = Method.METHOD; val SIMPLE_METHOD = Method.SIMPLE_METHOD; val SIMPLE_METHOD' = Method.SIMPLE_METHOD'; val SIMPLE_METHOD'' = Method.SIMPLE_METHOD''; diff --git a/src/Pure/Isar/parse.ML b/src/Pure/Isar/parse.ML --- a/src/Pure/Isar/parse.ML +++ b/src/Pure/Isar/parse.ML @@ -1,538 +1,530 @@ (* Title: Pure/Isar/parse.ML Author: Markus Wenzel, TU Muenchen Generic parsers for Isabelle/Isar outer syntax. *) signature PARSE = sig val group: (unit -> string) -> (Token.T list -> 'a) -> Token.T list -> 'a val !!! : (Token.T list -> 'a) -> Token.T list -> 'a val !!!! : (Token.T list -> 'a) -> Token.T list -> 'a val not_eof: Token.T parser val token: 'a parser -> Token.T parser val range: 'a parser -> ('a * Position.range) parser val position: 'a parser -> ('a * Position.T) parser val input: 'a parser -> Input.source parser val inner_syntax: 'a parser -> string parser val command: string parser val keyword: string parser val short_ident: string parser val long_ident: string parser val sym_ident: string parser val dots: string parser val minus: string parser val term_var: string parser val type_ident: string parser val type_var: string parser val number: string parser val float_number: string parser val string: string parser val string_position: (string * Position.T) parser val alt_string: string parser - val verbatim: string parser val cartouche: string parser val control: Antiquote.control parser val eof: string parser val command_name: string -> string parser val keyword_with: (string -> bool) -> string parser val keyword_markup: bool * Markup.T -> string -> string parser val keyword_improper: string -> string parser val $$$ : string -> string parser val reserved: string -> string parser val underscore: string parser val maybe: 'a parser -> 'a option parser val maybe_position: ('a * Position.T) parser -> ('a option * Position.T) parser val opt_keyword: string -> bool parser val opt_bang: bool parser val begin: string parser val opt_begin: bool parser val nat: int parser val int: int parser val real: real parser val enum_positions: string -> 'a parser -> ('a list * Position.T list) parser val enum1_positions: string -> 'a parser -> ('a list * Position.T list) parser val enum: string -> 'a parser -> 'a list parser val enum1: string -> 'a parser -> 'a list parser val and_list: 'a parser -> 'a list parser val and_list1: 'a parser -> 'a list parser val enum': string -> 'a context_parser -> 'a list context_parser val enum1': string -> 'a context_parser -> 'a list context_parser val and_list': 'a context_parser -> 'a list context_parser val and_list1': 'a context_parser -> 'a list context_parser val list: 'a parser -> 'a list parser val list1: 'a parser -> 'a list parser val name: string parser val name_range: (string * Position.range) parser val name_position: (string * Position.T) parser val binding: binding parser val embedded: string parser val embedded_inner_syntax: string parser val embedded_input: Input.source parser val embedded_position: (string * Position.T) parser - val text: string parser val path_input: Input.source parser val path: string parser val path_binding: (string * Position.T) parser val session_name: (string * Position.T) parser val theory_name: (string * Position.T) parser val liberal_name: string parser val parname: string parser val parbinding: binding parser val class: string parser val sort: string parser val type_const: string parser val arity: (string * string list * string) parser val multi_arity: (string list * string list * string) parser val type_args: string list parser val type_args_constrained: (string * string option) list parser val typ: string parser val mixfix: mixfix parser val mixfix': mixfix parser val opt_mixfix: mixfix parser val opt_mixfix': mixfix parser val syntax_mode: Syntax.mode parser val where_: string parser val const_decl: (string * string * mixfix) parser val const_binding: (binding * string * mixfix) parser val params: (binding * string option * mixfix) list parser val vars: (binding * string option * mixfix) list parser val for_fixes: (binding * string option * mixfix) list parser val ML_source: Input.source parser val document_source: Input.source parser val document_marker: Input.source parser val const: string parser val term: string parser val prop: string parser val literal_fact: string parser val propp: (string * string list) parser val termp: (string * string list) parser val private: Position.T parser val qualified: Position.T parser val target: (string * Position.T) parser val opt_target: (string * Position.T) option parser val args: Token.T list parser val args1: (string -> bool) -> Token.T list parser val attribs: Token.src list parser val opt_attribs: Token.src list parser val thm_sel: Facts.interval list parser val thm: (Facts.ref * Token.src list) parser val thms1: (Facts.ref * Token.src list) list parser val options: ((string * Position.T) * (string * Position.T)) list parser val embedded_ml: ML_Lex.token Antiquote.antiquote list parser - val embedded_ml_underscore: ML_Lex.token Antiquote.antiquote list parser val read_antiq: Keyword.keywords -> 'a parser -> Symbol_Pos.T list * Position.T -> 'a val read_embedded: Proof.context -> Keyword.keywords -> 'a parser -> Input.source -> 'a val read_embedded_src: Proof.context -> Keyword.keywords -> 'a parser -> Token.src -> 'a end; structure Parse: PARSE = struct (** error handling **) (* group atomic parsers (no cuts!) *) fun group s scan = scan || Scan.fail_with (fn [] => (fn () => s () ^ " expected,\nbut end-of-input was found") | tok :: _ => (fn () => (case Token.text_of tok of (txt, "") => s () ^ " expected,\nbut " ^ txt ^ Position.here (Token.pos_of tok) ^ " was found" | (txt1, txt2) => s () ^ " expected,\nbut " ^ txt1 ^ Position.here (Token.pos_of tok) ^ " was found:\n" ^ txt2))); (* cut *) fun cut kind scan = let fun get_pos [] = " (end-of-input)" | get_pos (tok :: _) = Position.here (Token.pos_of tok); fun err (toks, NONE) = (fn () => kind ^ get_pos toks) | err (toks, SOME msg) = (fn () => let val s = msg () in if String.isPrefix kind s then s else kind ^ get_pos toks ^ ": " ^ s end); in Scan.!! err scan end; fun !!! scan = cut "Outer syntax error" scan; fun !!!! scan = cut "Corrupted outer syntax in presentation" scan; (** basic parsers **) (* tokens *) fun RESET_VALUE atom = (*required for all primitive parsers*) Scan.ahead (Scan.one (K true)) -- atom >> (fn (arg, x) => (Token.assign NONE arg; x)); val not_eof = RESET_VALUE (Scan.one Token.not_eof); fun token atom = Scan.ahead not_eof --| atom; fun range scan = (Scan.ahead not_eof >> (Token.range_of o single)) -- scan >> Library.swap; fun position scan = (Scan.ahead not_eof >> Token.pos_of) -- scan >> Library.swap; fun input atom = Scan.ahead atom |-- not_eof >> Token.input_of; fun inner_syntax atom = Scan.ahead atom |-- not_eof >> Token.inner_syntax_of; fun kind k = group (fn () => Token.str_of_kind k) (RESET_VALUE (Scan.one (Token.is_kind k) >> Token.content_of)); val command = kind Token.Command; val keyword = kind Token.Keyword; val short_ident = kind Token.Ident; val long_ident = kind Token.Long_Ident; val sym_ident = kind Token.Sym_Ident; val term_var = kind Token.Var; val type_ident = kind Token.Type_Ident; val type_var = kind Token.Type_Var; val number = kind Token.Nat; val float_number = kind Token.Float; val string = kind Token.String; val alt_string = kind Token.Alt_String; -val verbatim = kind Token.Verbatim; val cartouche = kind Token.Cartouche; val control = token (kind Token.control_kind) >> (the o Token.get_control); val eof = kind Token.EOF; fun command_name x = group (fn () => Token.str_of_kind Token.Command ^ " " ^ quote x) (RESET_VALUE (Scan.one (fn tok => Token.is_command tok andalso Token.content_of tok = x))) >> Token.content_of; fun keyword_with pred = RESET_VALUE (Scan.one (Token.keyword_with pred) >> Token.content_of); fun keyword_markup markup x = group (fn () => Token.str_of_kind Token.Keyword ^ " " ^ quote x) (Scan.ahead not_eof -- keyword_with (fn y => x = y)) >> (fn (tok, x) => (Token.assign (SOME (Token.Literal markup)) tok; x)); val keyword_improper = keyword_markup (true, Markup.improper); val $$$ = keyword_markup (false, Markup.quasi_keyword); fun reserved x = group (fn () => "reserved identifier " ^ quote x) (RESET_VALUE (Scan.one (Token.ident_with (fn y => x = y)) >> Token.content_of)); val dots = sym_ident :-- (fn "\" => Scan.succeed () | _ => Scan.fail) >> #1; val minus = sym_ident :-- (fn "-" => Scan.succeed () | _ => Scan.fail) >> #1; val underscore = sym_ident :-- (fn "_" => Scan.succeed () | _ => Scan.fail) >> #1; fun maybe scan = underscore >> K NONE || scan >> SOME; fun maybe_position scan = position (underscore >> K NONE) || scan >> apfst SOME; val nat = number >> (#1 o Library.read_int o Symbol.explode); val int = Scan.optional (minus >> K ~1) 1 -- nat >> op *; val real = float_number >> Value.parse_real || int >> Real.fromInt; fun opt_keyword s = Scan.optional ($$$ "(" |-- !!! (($$$ s >> K true) --| $$$ ")")) false; val opt_bang = Scan.optional ($$$ "!" >> K true) false; val begin = $$$ "begin"; val opt_begin = Scan.optional (begin >> K true) false; (* enumerations *) fun enum1_positions sep scan = scan -- Scan.repeat (position ($$$ sep) -- !!! scan) >> (fn (x, ys) => (x :: map #2 ys, map (#2 o #1) ys)); fun enum_positions sep scan = enum1_positions sep scan || Scan.succeed ([], []); fun enum1 sep scan = scan ::: Scan.repeat ($$$ sep |-- !!! scan); fun enum sep scan = enum1 sep scan || Scan.succeed []; fun enum1' sep scan = scan ::: Scan.repeat (Scan.lift ($$$ sep) |-- scan); fun enum' sep scan = enum1' sep scan || Scan.succeed []; fun and_list1 scan = enum1 "and" scan; fun and_list scan = enum "and" scan; fun and_list1' scan = enum1' "and" scan; fun and_list' scan = enum' "and" scan; fun list1 scan = enum1 "," scan; fun list scan = enum "," scan; (* names and embedded content *) val name = group (fn () => "name") (short_ident || long_ident || sym_ident || number || string); val name_range = input name >> Input.source_content_range; val name_position = input name >> Input.source_content; val string_position = input string >> Input.source_content; val binding = name_position >> Binding.make; val embedded = group (fn () => "embedded content") (cartouche || string || short_ident || long_ident || sym_ident || term_var || type_ident || type_var || number); val embedded_inner_syntax = inner_syntax embedded; val embedded_input = input embedded; val embedded_position = embedded_input >> Input.source_content; -val text = group (fn () => "text") (embedded || verbatim); - val path_input = group (fn () => "file name/path specification") embedded_input; val path = path_input >> Input.string_of; val path_binding = group (fn () => "path binding (strict file name)") (position embedded); val session_name = group (fn () => "session name") name_position; val theory_name = group (fn () => "theory name") name_position; val liberal_name = keyword_with Token.ident_or_symbolic || name; val parname = Scan.optional ($$$ "(" |-- name --| $$$ ")") ""; val parbinding = Scan.optional ($$$ "(" |-- binding --| $$$ ")") Binding.empty; (* type classes *) val class = group (fn () => "type class") (inner_syntax embedded); val sort = group (fn () => "sort") (inner_syntax embedded); val type_const = group (fn () => "type constructor") (inner_syntax embedded); val arity = type_const -- ($$$ "::" |-- !!! (Scan.optional ($$$ "(" |-- !!! (list1 sort --| $$$ ")")) [] -- sort)) >> Scan.triple2; val multi_arity = and_list1 type_const -- ($$$ "::" |-- !!! (Scan.optional ($$$ "(" |-- !!! (list1 sort --| $$$ ")")) [] -- sort)) >> Scan.triple2; (* types *) val typ = group (fn () => "type") (inner_syntax embedded); fun type_arguments arg = arg >> single || $$$ "(" |-- !!! (list1 arg --| $$$ ")") || Scan.succeed []; val type_args = type_arguments type_ident; val type_args_constrained = type_arguments (type_ident -- Scan.option ($$$ "::" |-- !!! sort)); (* mixfix annotations *) local val mfix = input (string || cartouche); val mixfix_ = mfix -- !!! (Scan.optional ($$$ "[" |-- !!! (list nat --| $$$ "]")) [] -- Scan.optional nat 1000) >> (fn (sy, (ps, p)) => fn range => Mixfix (sy, ps, p, range)); val structure_ = $$$ "structure" >> K Structure; val binder_ = $$$ "binder" |-- !!! (mfix -- ($$$ "[" |-- nat --| $$$ "]" -- nat || nat >> (fn n => (n, n)))) >> (fn (sy, (p, q)) => fn range => Binder (sy, p, q, range)); val infixl_ = $$$ "infixl" |-- !!! (mfix -- nat >> (fn (sy, p) => fn range => Infixl (sy, p, range))); val infixr_ = $$$ "infixr" |-- !!! (mfix -- nat >> (fn (sy, p) => fn range => Infixr (sy, p, range))); val infix_ = $$$ "infix" |-- !!! (mfix -- nat >> (fn (sy, p) => fn range => Infix (sy, p, range))); val mixfix_body = mixfix_ || structure_ || binder_ || infixl_ || infixr_ || infix_; fun annotation guard body = Scan.trace ($$$ "(" |-- guard (body --| $$$ ")")) >> (fn (mx, toks) => mx (Token.range_of toks)); fun opt_annotation guard body = Scan.optional (annotation guard body) NoSyn; in val mixfix = annotation !!! mixfix_body; val mixfix' = annotation I mixfix_body; val opt_mixfix = opt_annotation !!! mixfix_body; val opt_mixfix' = opt_annotation I mixfix_body; end; (* syntax mode *) val syntax_mode_spec = ($$$ "output" >> K ("", false)) || name -- Scan.optional ($$$ "output" >> K false) true; val syntax_mode = Scan.optional ($$$ "(" |-- !!! (syntax_mode_spec --| $$$ ")")) Syntax.mode_default; (* fixes *) val where_ = $$$ "where"; val const_decl = name -- ($$$ "::" |-- !!! typ) -- opt_mixfix >> Scan.triple1; val const_binding = binding -- ($$$ "::" |-- !!! typ) -- opt_mixfix >> Scan.triple1; val param_mixfix = binding -- Scan.option ($$$ "::" |-- typ) -- mixfix' >> (single o Scan.triple1); val params = (binding -- Scan.repeat binding) -- Scan.option ($$$ "::" |-- !!! (Scan.ahead typ -- embedded)) >> (fn ((x, ys), T) => (x, Option.map #1 T, NoSyn) :: map (fn y => (y, Option.map #2 T, NoSyn)) ys); val vars = and_list1 (param_mixfix || params) >> flat; val for_fixes = Scan.optional ($$$ "for" |-- !!! vars) []; (* embedded source text *) -val ML_source = input (group (fn () => "ML source") text); -val document_source = input (group (fn () => "document source") text); +val ML_source = input (group (fn () => "ML source") embedded); +val document_source = input (group (fn () => "document source") embedded); val document_marker = group (fn () => "document marker") (RESET_VALUE (Scan.one Token.is_document_marker >> Token.input_of)); (* terms *) val const = group (fn () => "constant") (inner_syntax embedded); val term = group (fn () => "term") (inner_syntax embedded); val prop = group (fn () => "proposition") (inner_syntax embedded); val literal_fact = inner_syntax (group (fn () => "literal fact") (alt_string || cartouche)); (* patterns *) val is_terms = Scan.repeat1 ($$$ "is" |-- term); val is_props = Scan.repeat1 ($$$ "is" |-- prop); val propp = prop -- Scan.optional ($$$ "(" |-- !!! (is_props --| $$$ ")")) []; val termp = term -- Scan.optional ($$$ "(" |-- !!! (is_terms --| $$$ ")")) []; (* target information *) val private = position ($$$ "private") >> #2; val qualified = position ($$$ "qualified") >> #2; val target = ($$$ "(" -- $$$ "in") |-- !!! (name_position --| $$$ ")"); val opt_target = Scan.option target; (* arguments within outer syntax *) local val argument_kinds = [Token.Ident, Token.Long_Ident, Token.Sym_Ident, Token.Var, Token.Type_Ident, Token.Type_Var, - Token.Nat, Token.Float, Token.String, Token.Alt_String, Token.Cartouche, Token.Verbatim]; + Token.Nat, Token.Float, Token.String, Token.Alt_String, Token.Cartouche]; fun arguments is_symid = let fun argument blk = group (fn () => "argument") (Scan.one (fn tok => let val kind = Token.kind_of tok in member (op =) argument_kinds kind orelse Token.keyword_with is_symid tok orelse (blk andalso Token.keyword_with (fn s => s = ",") tok) end)); fun args blk x = Scan.optional (args1 blk) [] x and args1 blk x = (Scan.repeats1 (Scan.repeat1 (argument blk) || argsp "(" ")" || argsp "[" "]")) x and argsp l r x = (token ($$$ l) ::: !!! (args true @@@ (token ($$$ r) >> single))) x; in (args, args1) end; in val args = #1 (arguments Token.ident_or_symbolic) false; fun args1 is_symid = #2 (arguments is_symid) false; end; (* attributes *) val attrib = token liberal_name ::: !!! args; val attribs = $$$ "[" |-- list attrib --| $$$ "]"; val opt_attribs = Scan.optional attribs []; (* theorem references *) val thm_sel = $$$ "(" |-- list1 (nat --| minus -- nat >> Facts.FromTo || nat --| minus >> Facts.From || nat >> Facts.Single) --| $$$ ")"; val thm = $$$ "[" |-- attribs --| $$$ "]" >> pair (Facts.named "") || (literal_fact >> Facts.Fact || name_position -- Scan.option thm_sel >> Facts.Named) -- opt_attribs; val thms1 = Scan.repeat1 thm; (* options *) val option_name = group (fn () => "option name") name_position; val option_value = group (fn () => "option value") ((token real || token name) >> Token.content_of); val option = option_name :-- (fn (_, pos) => Scan.optional ($$$ "=" |-- !!! (position option_value)) ("true", pos)); val options = $$$ "[" |-- list1 option --| $$$ "]"; (* embedded ML *) val embedded_ml = + input underscore >> ML_Lex.read_source || embedded_input >> ML_Lex.read_source || control >> (ML_Lex.read_symbols o Antiquote.control_symbols); -val embedded_ml_underscore = - input underscore >> ML_Lex.read_source || embedded_ml; - (* read embedded source, e.g. for antiquotations *) fun tokenize keywords = Token.tokenize keywords {strict = true} #> filter Token.is_proper; fun read_antiq keywords scan (syms, pos) = (case Scan.read Token.stopper scan (tokenize (Keyword.no_major_keywords keywords) syms) of SOME res => res | NONE => error ("Malformed antiquotation" ^ Position.here pos)); fun read_embedded ctxt keywords parse input = let val toks = tokenize keywords (Input.source_explode input); val _ = Context_Position.reports_text ctxt (maps (Token.reports keywords) toks); in (case Scan.read Token.stopper parse toks of SOME res => res | NONE => error ("Bad input" ^ Position.here (Input.pos_of input))) end; fun read_embedded_src ctxt keywords parse src = Token.syntax (Scan.lift embedded_input) src ctxt |> #1 |> read_embedded ctxt keywords parse; end; diff --git a/src/Pure/Isar/parse.scala b/src/Pure/Isar/parse.scala --- a/src/Pure/Isar/parse.scala +++ b/src/Pure/Isar/parse.scala @@ -1,107 +1,107 @@ /* Title: Pure/Isar/parse.scala Author: Makarius Generic parsers for Isabelle/Isar outer syntax. */ package isabelle import scala.util.parsing.combinator.Parsers import scala.annotation.tailrec object Parse { /* parsing tokens */ trait Parser extends Parsers { type Elem = Token def filter_proper: Boolean = true @tailrec private def proper(in: Input): Input = if (!filter_proper || in.atEnd || in.first.is_proper) in else proper(in.rest) private def proper_position: Parser[Position.T] = new Parser[Position.T] { def apply(raw_input: Input) = { val in = proper(raw_input) val pos = in.pos match { case pos: Token.Pos => pos case _ => Token.Pos.none } Success(if (in.atEnd) pos.position() else pos.position(in.first), in) } } def position[A](parser: Parser[A]): Parser[(A, Position.T)] = proper_position ~ parser ^^ { case x ~ y => (y, x) } def token(s: String, pred: Elem => Boolean): Parser[Elem] = new Parser[Elem] { def apply(raw_input: Input) = { val in = proper(raw_input) if (in.atEnd) Failure(s + " expected,\nbut end-of-input was found", in) else { val token = in.first if (pred(token)) Success(token, proper(in.rest)) else Failure(s + " expected,\nbut " + token.kind + " was found:\n" + token.source, in) } } } def atom(s: String, pred: Elem => Boolean): Parser[String] = token(s, pred) ^^ (_.content) def command(name: String): Parser[String] = atom("command " + quote(name), _.is_command(name)) def $$$(name: String): Parser[String] = atom("keyword " + quote(name), _.is_keyword(name)) def string: Parser[String] = atom("string", _.is_string) def nat: Parser[Int] = atom("natural number", _.is_nat) ^^ (s => Integer.parseInt(s)) def name: Parser[String] = atom("name", _.is_name) def embedded: Parser[String] = atom("embedded content", _.is_embedded) - def text: Parser[String] = atom("text", _.is_text) - def ML_source: Parser[String] = atom("ML source", _.is_text) - def document_source: Parser[String] = atom("document source", _.is_text) + def text: Parser[String] = atom("text", _.is_embedded) + def ML_source: Parser[String] = atom("ML source", _.is_embedded) + def document_source: Parser[String] = atom("document source", _.is_embedded) def opt_keyword(s: String): Parser[Boolean] = ($$$("(") ~! $$$(s) ~ $$$(")")) ^^ { case _ => true } | success(false) def path: Parser[String] = atom("file name/path specification", tok => tok.is_embedded && Path.is_wellformed(tok.content)) def session_name: Parser[String] = atom("session name", _.is_system_name) def theory_name: Parser[String] = atom("theory name", _.is_system_name) private def tag_name: Parser[String] = atom("tag name", tok => tok.kind == Token.Kind.IDENT || tok.kind == Token.Kind.STRING) def tag: Parser[String] = $$$("%") ~> tag_name def tags: Parser[List[String]] = rep(tag) def marker: Parser[String] = token("marker", _.is_marker) ^^ (_.content) def annotation: Parser[Unit] = rep(tag | marker) ^^ { case _ => () } /* wrappers */ def parse[T](p: Parser[T], in: Token.Reader): ParseResult[T] = p(in) def parse_all[T](p: Parser[T], in: Token.Reader): ParseResult[T] = { val result = parse(p, in) val rest = proper(result.next) if (result.successful && !rest.atEnd) Error("bad input", rest) else result } } } diff --git a/src/Pure/Isar/token.ML b/src/Pure/Isar/token.ML --- a/src/Pure/Isar/token.ML +++ b/src/Pure/Isar/token.ML @@ -1,824 +1,802 @@ (* Title: Pure/Isar/token.ML Author: Markus Wenzel, TU Muenchen Outer token syntax for Isabelle/Isar. *) signature TOKEN = sig datatype kind = (*immediate source*) Command | Keyword | Ident | Long_Ident | Sym_Ident | Var | Type_Ident | Type_Var | Nat | Float | Space | (*delimited content*) - String | Alt_String | Verbatim | Cartouche | + String | Alt_String | Cartouche | Control of Antiquote.control | Comment of Comment.kind option | (*special content*) Error of string | EOF val control_kind: kind val str_of_kind: kind -> string type file = {src_path: Path.T, lines: string list, digest: SHA1.digest, pos: Position.T} type T type src = T list type name_value = {name: string, kind: string, print: Proof.context -> Markup.T * xstring} datatype value = Source of src | Literal of bool * Markup.T | Name of name_value * morphism | Typ of typ | Term of term | Fact of string option * thm list | Attribute of morphism -> attribute | Declaration of declaration | Files of file Exn.result list | Output of XML.body option val pos_of: T -> Position.T val adjust_offsets: (int -> int option) -> T -> T val eof: T val is_eof: T -> bool val not_eof: T -> bool val stopper: T Scan.stopper val kind_of: T -> kind val is_kind: kind -> T -> bool val get_control: T -> Antiquote.control option val is_command: T -> bool val keyword_with: (string -> bool) -> T -> bool val is_command_modifier: T -> bool val ident_with: (string -> bool) -> T -> bool val is_proper: T -> bool val is_comment: T -> bool val is_informal_comment: T -> bool val is_formal_comment: T -> bool val is_document_marker: T -> bool val is_ignored: T -> bool val is_begin_ignore: T -> bool val is_end_ignore: T -> bool val is_error: T -> bool val is_space: T -> bool val is_blank: T -> bool val is_newline: T -> bool val range_of: T list -> Position.range val core_range_of: T list -> Position.range val content_of: T -> string val source_of: T -> string val input_of: T -> Input.source val inner_syntax_of: T -> string val keyword_markup: bool * Markup.T -> string -> Markup.T val completion_report: T -> Position.report_text list val reports: Keyword.keywords -> T -> Position.report_text list val markups: Keyword.keywords -> T -> Markup.T list val unparse: T -> string val print: T -> string val text_of: T -> string * string val file_source: file -> Input.source val get_files: T -> file Exn.result list val put_files: file Exn.result list -> T -> T val get_output: T -> XML.body option val put_output: XML.body -> T -> T val get_value: T -> value option val reports_of_value: T -> Position.report list val name_value: name_value -> value val get_name: T -> name_value option val declare_maxidx: T -> Proof.context -> Proof.context val map_facts: (string option -> thm list -> thm list) -> T -> T val trim_context_src: src -> src val transform: morphism -> T -> T val init_assignable: T -> T val assign: value option -> T -> T val evaluate: ('a -> value) -> (T -> 'a) -> T -> 'a val closure: T -> T val pretty_value: Proof.context -> T -> Pretty.T val name_of_src: src -> string * Position.T val args_of_src: src -> T list val checked_src: src -> bool val check_src: Proof.context -> (Proof.context -> 'a Name_Space.table) -> src -> src * 'a val pretty_src: Proof.context -> src -> Pretty.T val ident_or_symbolic: string -> bool val read_cartouche: Symbol_Pos.T list -> T val tokenize: Keyword.keywords -> {strict: bool} -> Symbol_Pos.T list -> T list val explode: Keyword.keywords -> Position.T -> string -> T list val explode0: Keyword.keywords -> string -> T list val print_name: Keyword.keywords -> string -> string val print_properties: Keyword.keywords -> Properties.T -> string val make: (int * int) * string -> Position.T -> T * Position.T val make_string: string * Position.T -> T val make_int: int -> T list val make_src: string * Position.T -> T list -> src type 'a parser = T list -> 'a * T list type 'a context_parser = Context.generic * T list -> 'a * (Context.generic * T list) val syntax_generic: 'a context_parser -> src -> Context.generic -> 'a * Context.generic val syntax: 'a context_parser -> src -> Proof.context -> 'a * Proof.context end; structure Token: TOKEN = struct (** tokens **) (* token kind *) datatype kind = (*immediate source*) Command | Keyword | Ident | Long_Ident | Sym_Ident | Var | Type_Ident | Type_Var | Nat | Float | Space | (*delimited content*) - String | Alt_String | Verbatim | Cartouche | + String | Alt_String | Cartouche | Control of Antiquote.control | Comment of Comment.kind option | (*special content*) Error of string | EOF; val control_kind = Control Antiquote.no_control; fun equiv_kind kind kind' = (case (kind, kind') of (Control _, Control _) => true | (Error _, Error _) => true | _ => kind = kind'); val str_of_kind = fn Command => "command" | Keyword => "keyword" | Ident => "identifier" | Long_Ident => "long identifier" | Sym_Ident => "symbolic identifier" | Var => "schematic variable" | Type_Ident => "type variable" | Type_Var => "schematic type variable" | Nat => "natural number" | Float => "floating-point number" | Space => "white space" | String => "quoted string" | Alt_String => "back-quoted string" - | Verbatim => "verbatim text" | Cartouche => "text cartouche" | Control _ => "control cartouche" | Comment NONE => "informal comment" | Comment (SOME _) => "formal comment" | Error _ => "bad input" | EOF => "end-of-input"; val immediate_kinds = Vector.fromList [Command, Keyword, Ident, Long_Ident, Sym_Ident, Var, Type_Ident, Type_Var, Nat, Float, Space]; val delimited_kind = (fn String => true | Alt_String => true - | Verbatim => true | Cartouche => true | Control _ => true | Comment _ => true | _ => false); (* datatype token *) (*The value slot assigns an (optional) internal value to a token, usually as a side-effect of special scanner setup (see also args.ML). Note that an assignable ref designates an intermediate state of internalization -- it is NOT meant to persist.*) type file = {src_path: Path.T, lines: string list, digest: SHA1.digest, pos: Position.T}; type name_value = {name: string, kind: string, print: Proof.context -> Markup.T * xstring}; datatype T = Token of (Symbol_Pos.text * Position.range) * (kind * string) * slot and slot = Slot | Value of value option | Assignable of value option Unsynchronized.ref and value = Source of T list | Literal of bool * Markup.T | Name of name_value * morphism | Typ of typ | Term of term | Fact of string option * thm list | (*optional name for dynamic fact, i.e. fact "variable"*) Attribute of morphism -> attribute | Declaration of declaration | Files of file Exn.result list | Output of XML.body option; type src = T list; (* position *) fun pos_of (Token ((_, (pos, _)), _, _)) = pos; fun end_pos_of (Token ((_, (_, pos)), _, _)) = pos; fun adjust_offsets adjust (Token ((x, range), y, z)) = Token ((x, apply2 (Position.adjust_offsets adjust) range), y, z); (* stopper *) fun mk_eof pos = Token (("", (pos, Position.none)), (EOF, ""), Slot); val eof = mk_eof Position.none; fun is_eof (Token (_, (EOF, _), _)) = true | is_eof _ = false; val not_eof = not o is_eof; val stopper = Scan.stopper (fn [] => eof | toks => mk_eof (end_pos_of (List.last toks))) is_eof; (* kind of token *) fun kind_of (Token (_, (k, _), _)) = k; fun is_kind k (Token (_, (k', _), _)) = equiv_kind k k'; fun get_control tok = (case kind_of tok of Control control => SOME control | _ => NONE); val is_command = is_kind Command; fun keyword_with pred (Token (_, (Keyword, x), _)) = pred x | keyword_with _ _ = false; val is_command_modifier = keyword_with (fn x => x = "private" orelse x = "qualified"); fun ident_with pred (Token (_, (Ident, x), _)) = pred x | ident_with _ _ = false; fun is_ignored (Token (_, (Space, _), _)) = true | is_ignored (Token (_, (Comment NONE, _), _)) = true | is_ignored _ = false; fun is_proper (Token (_, (Space, _), _)) = false | is_proper (Token (_, (Comment _, _), _)) = false | is_proper _ = true; fun is_comment (Token (_, (Comment _, _), _)) = true | is_comment _ = false; fun is_informal_comment (Token (_, (Comment NONE, _), _)) = true | is_informal_comment _ = false; fun is_formal_comment (Token (_, (Comment (SOME _), _), _)) = true | is_formal_comment _ = false; fun is_document_marker (Token (_, (Comment (SOME Comment.Marker), _), _)) = true | is_document_marker _ = false; fun is_begin_ignore (Token (_, (Comment NONE, "<"), _)) = true | is_begin_ignore _ = false; fun is_end_ignore (Token (_, (Comment NONE, ">"), _)) = true | is_end_ignore _ = false; fun is_error (Token (_, (Error _, _), _)) = true | is_error _ = false; (* blanks and newlines -- space tokens obey lines *) fun is_space (Token (_, (Space, _), _)) = true | is_space _ = false; fun is_blank (Token (_, (Space, x), _)) = not (String.isSuffix "\n" x) | is_blank _ = false; fun is_newline (Token (_, (Space, x), _)) = String.isSuffix "\n" x | is_newline _ = false; (* range of tokens *) fun range_of (toks as tok :: _) = let val pos' = end_pos_of (List.last toks) in Position.range (pos_of tok, pos') end | range_of [] = Position.no_range; val core_range_of = drop_prefix is_ignored #> drop_suffix is_ignored #> range_of; (* token content *) fun content_of (Token (_, (_, x), _)) = x; fun source_of (Token ((source, _), _, _)) = source; fun input_of (Token ((source, range), (kind, _), _)) = Input.source (delimited_kind kind) source range; fun inner_syntax_of tok = let val x = content_of tok in if YXML.detect x then x else Syntax.implode_input (input_of tok) end; (* markup reports *) local val token_kind_markup = fn Var => (Markup.var, "") | Type_Ident => (Markup.tfree, "") | Type_Var => (Markup.tvar, "") | String => (Markup.string, "") | Alt_String => (Markup.alt_string, "") - | Verbatim => (Markup.verbatim, "") | Cartouche => (Markup.cartouche, "") | Control _ => (Markup.cartouche, "") | Comment _ => (Markup.comment, "") | Error msg => (Markup.bad (), msg) | _ => (Markup.empty, ""); fun keyword_reports tok = map (fn markup => ((pos_of tok, markup), "")); fun command_markups keywords x = if Keyword.is_theory_end keywords x then [Markup.keyword2 |> Markup.keyword_properties] else (if Keyword.is_proof_asm keywords x then [Markup.keyword3] else if Keyword.is_improper keywords x then [Markup.keyword1, Markup.improper] else [Markup.keyword1]) |> map Markup.command_properties; in fun keyword_markup (important, keyword) x = if important orelse Symbol.is_ascii_identifier x then keyword else Markup.delimiter; fun completion_report tok = if is_kind Keyword tok then map (fn m => ((pos_of tok, m), "")) (Completion.suppress_abbrevs (content_of tok)) else []; fun reports keywords tok = if is_command tok then keyword_reports tok (command_markups keywords (content_of tok)) else if is_kind Keyword tok then keyword_reports tok [keyword_markup (false, Markup.keyword2 |> Markup.keyword_properties) (content_of tok)] else let val pos = pos_of tok; val (m, text) = token_kind_markup (kind_of tok); val deleted = Symbol_Pos.explode_deleted (source_of tok, pos); in ((pos, m), text) :: map (fn p => ((p, Markup.delete), "")) deleted end; fun markups keywords = map (#2 o #1) o reports keywords; end; (* unparse *) fun unparse (Token (_, (kind, x), _)) = (case kind of String => Symbol_Pos.quote_string_qq x | Alt_String => Symbol_Pos.quote_string_bq x - | Verbatim => enclose "{*" "*}" x | Cartouche => cartouche x | Control control => Symbol_Pos.content (Antiquote.control_symbols control) | Comment NONE => enclose "(*" "*)" x | EOF => "" | _ => x); fun print tok = Markup.markups (markups Keyword.empty_keywords tok) (unparse tok); fun text_of tok = let val k = str_of_kind (kind_of tok); val ms = markups Keyword.empty_keywords tok; val s = unparse tok; in if s = "" then (k, "") else if size s < 40 andalso not (exists_string (fn c => c = "\n") s) then (k ^ " " ^ Markup.markups ms s, "") else (k, Markup.markups ms s) end; (** associated values **) (* inlined file content *) fun file_source (file: file) = let val text = cat_lines (#lines file); val end_pos = Position.symbol_explode text (#pos file); in Input.source true text (Position.range (#pos file, end_pos)) end; fun get_files (Token (_, _, Value (SOME (Files files)))) = files | get_files _ = []; fun put_files [] tok = tok | put_files files (Token (x, y, Slot)) = Token (x, y, Value (SOME (Files files))) | put_files _ tok = raise Fail ("Cannot put inlined files here" ^ Position.here (pos_of tok)); (* document output *) fun get_output (Token (_, _, Value (SOME (Output output)))) = output | get_output _ = NONE; fun put_output output (Token (x, y, Slot)) = Token (x, y, Value (SOME (Output (SOME output)))) | put_output _ tok = raise Fail ("Cannot put document output here" ^ Position.here (pos_of tok)); (* access values *) fun get_value (Token (_, _, Value v)) = v | get_value _ = NONE; fun map_value f (Token (x, y, Value (SOME v))) = Token (x, y, Value (SOME (f v))) | map_value _ tok = tok; (* reports of value *) fun get_assignable_value (Token (_, _, Assignable r)) = ! r | get_assignable_value (Token (_, _, Value v)) = v | get_assignable_value _ = NONE; fun reports_of_value tok = (case get_assignable_value tok of SOME (Literal markup) => let val pos = pos_of tok; val x = content_of tok; in if Position.is_reported pos then map (pair pos) (keyword_markup markup x :: Completion.suppress_abbrevs x) else [] end | _ => []); (* name value *) fun name_value a = Name (a, Morphism.identity); fun get_name tok = (case get_assignable_value tok of SOME (Name (a, _)) => SOME a | _ => NONE); (* maxidx *) fun declare_maxidx tok = (case get_value tok of SOME (Source src) => fold declare_maxidx src | SOME (Typ T) => Variable.declare_maxidx (Term.maxidx_of_typ T) | SOME (Term t) => Variable.declare_maxidx (Term.maxidx_of_term t) | SOME (Fact (_, ths)) => fold (Variable.declare_maxidx o Thm.maxidx_of) ths | SOME (Attribute _) => I (* FIXME !? *) | SOME (Declaration decl) => (fn ctxt => let val ctxt' = Context.proof_map (Morphism.form decl) ctxt in Variable.declare_maxidx (Variable.maxidx_of ctxt') ctxt end) | _ => I); (* fact values *) fun map_facts f = map_value (fn v => (case v of Source src => Source (map (map_facts f) src) | Fact (a, ths) => Fact (a, f a ths) | _ => v)); val trim_context_src = (map o map_facts) (K (map Thm.trim_context)); (* transform *) fun transform phi = map_value (fn v => (case v of Source src => Source (map (transform phi) src) | Literal _ => v | Name (a, psi) => Name (a, psi $> phi) | Typ T => Typ (Morphism.typ phi T) | Term t => Term (Morphism.term phi t) | Fact (a, ths) => Fact (a, Morphism.fact phi ths) | Attribute att => Attribute (Morphism.transform phi att) | Declaration decl => Declaration (Morphism.transform phi decl) | Files _ => v | Output _ => v)); (* static binding *) (*1st stage: initialize assignable slots*) fun init_assignable tok = (case tok of Token (x, y, Slot) => Token (x, y, Assignable (Unsynchronized.ref NONE)) | Token (_, _, Value _) => tok | Token (_, _, Assignable r) => (r := NONE; tok)); (*2nd stage: assign values as side-effect of scanning*) fun assign v tok = (case tok of Token (x, y, Slot) => Token (x, y, Value v) | Token (_, _, Value _) => tok | Token (_, _, Assignable r) => (r := v; tok)); fun evaluate mk eval arg = let val x = eval arg in (assign (SOME (mk x)) arg; x) end; (*3rd stage: static closure of final values*) fun closure (Token (x, y, Assignable (Unsynchronized.ref v))) = Token (x, y, Value v) | closure tok = tok; (* pretty *) fun pretty_value ctxt tok = (case get_value tok of SOME (Literal markup) => let val x = content_of tok in Pretty.mark_str (keyword_markup markup x, x) end | SOME (Name ({print, ...}, _)) => Pretty.quote (Pretty.mark_str (print ctxt)) | SOME (Typ T) => Syntax.pretty_typ ctxt T | SOME (Term t) => Syntax.pretty_term ctxt t | SOME (Fact (_, ths)) => Pretty.enclose "(" ")" (Pretty.breaks (map (Pretty.cartouche o Thm.pretty_thm ctxt) ths)) | _ => Pretty.marks_str (markups Keyword.empty_keywords tok, unparse tok)); (* src *) fun dest_src ([]: src) = raise Fail "Empty token source" | dest_src (head :: args) = (head, args); fun name_of_src src = let val head = #1 (dest_src src); val name = (case get_name head of SOME {name, ...} => name | NONE => content_of head); in (name, pos_of head) end; val args_of_src = #2 o dest_src; fun pretty_src ctxt src = let val (head, args) = dest_src src; val prt_name = (case get_name head of SOME {print, ...} => Pretty.mark_str (print ctxt) | NONE => Pretty.str (content_of head)); in Pretty.block (Pretty.breaks (Pretty.quote prt_name :: map (pretty_value ctxt) args)) end; fun checked_src (head :: _) = is_some (get_name head) | checked_src [] = true; fun check_src ctxt get_table src = let val (head, args) = dest_src src; val table = get_table ctxt; in (case get_name head of SOME {name, ...} => (src, Name_Space.get table name) | NONE => let val pos = pos_of head; val (name, x) = Name_Space.check (Context.Proof ctxt) table (content_of head, pos); val _ = Context_Position.report ctxt pos Markup.operator; val kind = Name_Space.kind_of (Name_Space.space_of_table table); fun print ctxt' = Name_Space.markup_extern ctxt' (Name_Space.space_of_table (get_table ctxt')) name; val value = name_value {name = name, kind = kind, print = print}; val head' = closure (assign (SOME value) head); in (head' :: args, x) end) end; (** scanners **) open Basic_Symbol_Pos; val err_prefix = "Outer lexical error: "; fun !!! msg = Symbol_Pos.!!! (fn () => err_prefix ^ msg); (* scan symbolic idents *) val scan_symid = Scan.many1 (Symbol.is_symbolic_char o Symbol_Pos.symbol) || Scan.one (Symbol.is_symbolic o Symbol_Pos.symbol) >> single; fun is_symid str = (case try Symbol.explode str of SOME [s] => Symbol.is_symbolic s orelse Symbol.is_symbolic_char s | SOME ss => forall Symbol.is_symbolic_char ss | _ => false); fun ident_or_symbolic "begin" = false | ident_or_symbolic ":" = true | ident_or_symbolic "::" = true | ident_or_symbolic s = Symbol_Pos.is_identifier s orelse is_symid s; -(* scan verbatim text *) - -val scan_verb = - $$$ "*" --| Scan.ahead (~$$ "}") || - Scan.one (fn (s, _) => s <> "*" andalso Symbol.not_eof s) >> single; - -val scan_verbatim = - Scan.ahead ($$ "{" -- $$ "*") |-- - !!! "unclosed verbatim text" - ((Symbol_Pos.scan_pos --| $$ "{" --| $$ "*") -- - (Scan.repeats scan_verb -- ($$ "*" |-- $$ "}" |-- Symbol_Pos.scan_pos))); - -val recover_verbatim = - $$$ "{" @@@ $$$ "*" @@@ Scan.repeats scan_verb; - - (* scan cartouche *) val scan_cartouche = Symbol_Pos.scan_pos -- ((Symbol_Pos.scan_cartouche err_prefix >> Symbol_Pos.cartouche_content) -- Symbol_Pos.scan_pos); (* scan space *) fun space_symbol (s, _) = Symbol.is_blank s andalso s <> "\n"; val scan_space = Scan.many1 space_symbol @@@ Scan.optional ($$$ "\n") [] || Scan.many space_symbol @@@ $$$ "\n"; (* scan comment *) val scan_comment = Symbol_Pos.scan_pos -- (Symbol_Pos.scan_comment_body err_prefix -- Symbol_Pos.scan_pos); (** token sources **) local fun token_leq ((_, syms1), (_, syms2)) = length syms1 <= length syms2; fun token k ss = Token ((Symbol_Pos.implode ss, Symbol_Pos.range ss), (k, Symbol_Pos.content ss), Slot); fun token_range k (pos1, (ss, pos2)) = Token (Symbol_Pos.implode_range (pos1, pos2) ss, (k, Symbol_Pos.content ss), Slot); fun scan_token keywords = !!! "bad input" (Symbol_Pos.scan_string_qq err_prefix >> token_range String || Symbol_Pos.scan_string_bq err_prefix >> token_range Alt_String || - scan_verbatim >> token_range Verbatim || scan_comment >> token_range (Comment NONE) || Comment.scan_outer >> (fn (k, ss) => token (Comment (SOME k)) ss) || scan_cartouche >> token_range Cartouche || Antiquote.scan_control err_prefix >> (fn control => token (Control control) (Antiquote.control_symbols control)) || scan_space >> token Space || (Scan.max token_leq (Scan.max token_leq (Scan.literal (Keyword.major_keywords keywords) >> pair Command) (Scan.literal (Keyword.minor_keywords keywords) >> pair Keyword)) (Lexicon.scan_longid >> pair Long_Ident || Lexicon.scan_id >> pair Ident || Lexicon.scan_var >> pair Var || Lexicon.scan_tid >> pair Type_Ident || Lexicon.scan_tvar >> pair Type_Var || Symbol_Pos.scan_float >> pair Float || Symbol_Pos.scan_nat >> pair Nat || scan_symid >> pair Sym_Ident) >> uncurry token)); fun recover msg = (Symbol_Pos.recover_string_qq || Symbol_Pos.recover_string_bq || - recover_verbatim || Symbol_Pos.recover_cartouche || Symbol_Pos.recover_comment || Scan.one (Symbol.not_eof o Symbol_Pos.symbol) >> single) >> (single o token (Error msg)); in fun make_source keywords {strict} = let val scan_strict = Scan.bulk (scan_token keywords); val scan = if strict then scan_strict else Scan.recover scan_strict recover; in Source.source Symbol_Pos.stopper scan end; fun read_cartouche syms = (case Scan.read Symbol_Pos.stopper (scan_cartouche >> token_range Cartouche) syms of SOME tok => tok | NONE => error ("Single cartouche expected" ^ Position.here (#1 (Symbol_Pos.range syms)))); end; (* explode *) fun tokenize keywords strict syms = Source.of_list syms |> make_source keywords strict |> Source.exhaust; fun explode keywords pos text = Symbol_Pos.explode (text, pos) |> tokenize keywords {strict = false}; fun explode0 keywords = explode keywords Position.none; (* print names in parsable form *) fun print_name keywords name = ((case explode keywords Position.none name of [tok] => not (member (op =) [Ident, Long_Ident, Sym_Ident, Nat] (kind_of tok)) | _ => true) ? Symbol_Pos.quote_string_qq) name; fun print_properties keywords = map (apply2 (print_name keywords) #> (fn (a, b) => a ^ " = " ^ b)) #> commas #> enclose "[" "]"; (* make *) fun make ((k, n), s) pos = let val pos' = Position.advance_offsets n pos; val range = Position.range (pos, pos'); val tok = if 0 <= k andalso k < Vector.length immediate_kinds then Token ((s, range), (Vector.sub (immediate_kinds, k), s), Slot) else (case explode Keyword.empty_keywords pos s of [tok] => tok | _ => Token ((s, range), (Error (err_prefix ^ "exactly one token expected"), s), Slot)) in (tok, pos') end; fun make_string (s, pos) = let val Token ((x, _), y, z) = #1 (make ((~1, 0), Symbol_Pos.quote_string_qq s) Position.none); val pos' = Position.no_range_position pos; in Token ((x, (pos', pos')), y, z) end; val make_int = explode Keyword.empty_keywords Position.none o signed_string_of_int; fun make_src a args = make_string a :: args; (** parsers **) type 'a parser = T list -> 'a * T list; type 'a context_parser = Context.generic * T list -> 'a * (Context.generic * T list); (* wrapped syntax *) fun syntax_generic scan src context = let val (name, pos) = name_of_src src; val old_reports = maps reports_of_value src; val args1 = map init_assignable (args_of_src src); fun reported_text () = if Context_Position.reports_enabled_generic context then let val new_reports = maps (reports_of_value o closure) args1 in if old_reports <> new_reports then map (fn (p, m) => Position.reported_text p m "") new_reports else [] end else []; in (case Scan.error (Scan.finite' stopper (Scan.option scan)) (context, args1) of (SOME x, (context', [])) => let val _ = Output.report (reported_text ()) in (x, context') end | (_, (context', args2)) => let val print_name = (case get_name (hd src) of NONE => quote name | SOME {kind, print, ...} => let val ctxt' = Context.proof_of context'; val (markup, xname) = print ctxt'; in plain_words kind ^ " " ^ quote (Markup.markup markup xname) end); val print_args = if null args2 then "" else ":\n " ^ space_implode " " (map print args2); in error ("Bad arguments for " ^ print_name ^ Position.here pos ^ print_args ^ Markup.markup_report (implode (reported_text ()))) end) end; fun syntax scan src = apsnd Context.the_proof o syntax_generic scan src o Context.Proof; end; type 'a parser = 'a Token.parser; type 'a context_parser = 'a Token.context_parser; diff --git a/src/Pure/Isar/token.scala b/src/Pure/Isar/token.scala --- a/src/Pure/Isar/token.scala +++ b/src/Pure/Isar/token.scala @@ -1,335 +1,328 @@ /* Title: Pure/Isar/token.scala Author: Makarius Outer token syntax for Isabelle/Isar. */ package isabelle import scala.collection.mutable import scala.util.parsing.input object Token { /* tokens */ object Kind extends Enumeration { /*immediate source*/ val COMMAND = Value("command") val KEYWORD = Value("keyword") val IDENT = Value("identifier") val LONG_IDENT = Value("long identifier") val SYM_IDENT = Value("symbolic identifier") val VAR = Value("schematic variable") val TYPE_IDENT = Value("type variable") val TYPE_VAR = Value("schematic type variable") val NAT = Value("natural number") val FLOAT = Value("floating-point number") val SPACE = Value("white space") /*delimited content*/ val STRING = Value("string") val ALT_STRING = Value("back-quoted string") - val VERBATIM = Value("verbatim text") val CARTOUCHE = Value("text cartouche") val CONTROL = Value("control cartouche") val INFORMAL_COMMENT = Value("informal comment") val FORMAL_COMMENT = Value("formal comment") /*special content*/ val ERROR = Value("bad input") val UNPARSED = Value("unparsed input") } /* parsers */ object Parsers extends Parsers trait Parsers extends Scan.Parsers with Comment.Parsers { private def delimited_token: Parser[Token] = { val string = quoted("\"") ^^ (x => Token(Token.Kind.STRING, x)) val alt_string = quoted("`") ^^ (x => Token(Token.Kind.ALT_STRING, x)) - val verb = verbatim ^^ (x => Token(Token.Kind.VERBATIM, x)) val cmt = comment ^^ (x => Token(Token.Kind.INFORMAL_COMMENT, x)) val formal_cmt = comment_cartouche ^^ (x => Token(Token.Kind.FORMAL_COMMENT, x)) val cart = cartouche ^^ (x => Token(Token.Kind.CARTOUCHE, x)) val ctrl = control_cartouche ^^ (x => Token(Token.Kind.CONTROL, x)) - string | (alt_string | (verb | (cmt | (formal_cmt | (cart | ctrl))))) + string | (alt_string | (cmt | (formal_cmt | (cart | ctrl)))) } private def other_token(keywords: Keyword.Keywords): Parser[Token] = { val letdigs1 = many1(Symbol.is_letdig) val sub = one(s => s == Symbol.sub_decoded || s == Symbol.sub) val id = one(Symbol.is_letter) ~ (rep(letdigs1 | (sub ~ letdigs1 ^^ { case x ~ y => x + y })) ^^ (_.mkString)) ^^ { case x ~ y => x + y } val nat = many1(Symbol.is_digit) val natdot = nat ~ "." ~ nat ^^ { case x ~ y ~ z => x + y + z } val id_nat = id ~ opt("." ~ nat) ^^ { case x ~ Some(y ~ z) => x + y + z case x ~ None => x } val ident = id ~ rep("." ~> id) ^^ { case x ~ Nil => Token(Token.Kind.IDENT, x) case x ~ ys => Token(Token.Kind.LONG_IDENT, (x :: ys).mkString(".")) } val var_ = "?" ~ id_nat ^^ { case x ~ y => Token(Token.Kind.VAR, x + y) } val type_ident = "'" ~ id ^^ { case x ~ y => Token(Token.Kind.TYPE_IDENT, x + y) } val type_var = "?'" ~ id_nat ^^ { case x ~ y => Token(Token.Kind.TYPE_VAR, x + y) } val nat_ = nat ^^ (x => Token(Token.Kind.NAT, x)) val float = ("-" ~ natdot ^^ { case x ~ y => x + y } | natdot) ^^ (x => Token(Token.Kind.FLOAT, x)) val sym_ident = (many1(Symbol.is_symbolic_char) | one(sym => Symbol.is_symbolic(sym))) ^^ (x => Token(Token.Kind.SYM_IDENT, x)) val keyword = literal(keywords.minor) ^^ (x => Token(Token.Kind.KEYWORD, x)) ||| literal(keywords.major) ^^ (x => Token(Token.Kind.COMMAND, x)) val space = many1(Symbol.is_blank) ^^ (x => Token(Token.Kind.SPACE, x)) val recover_delimited = (recover_quoted("\"") | (recover_quoted("`") | - (recover_verbatim | - (recover_cartouche | recover_comment)))) ^^ (x => Token(Token.Kind.ERROR, x)) + (recover_cartouche | recover_comment))) ^^ (x => Token(Token.Kind.ERROR, x)) val bad = one(_ => true) ^^ (x => Token(Token.Kind.ERROR, x)) space | (recover_delimited | (((ident | (var_ | (type_ident | (type_var | (float | (nat_ | sym_ident)))))) ||| keyword) | bad)) } def token(keywords: Keyword.Keywords): Parser[Token] = delimited_token | other_token(keywords) def token_line(keywords: Keyword.Keywords, ctxt: Scan.Line_Context) : Parser[(Token, Scan.Line_Context)] = { val string = quoted_line("\"", ctxt) ^^ { case (x, c) => (Token(Token.Kind.STRING, x), c) } val alt_string = quoted_line("`", ctxt) ^^ { case (x, c) => (Token(Token.Kind.ALT_STRING, x), c) } - val verb = verbatim_line(ctxt) ^^ { case (x, c) => (Token(Token.Kind.VERBATIM, x), c) } val cart = cartouche_line(ctxt) ^^ { case (x, c) => (Token(Token.Kind.CARTOUCHE, x), c) } val cmt = comment_line(ctxt) ^^ { case (x, c) => (Token(Token.Kind.INFORMAL_COMMENT, x), c) } val formal_cmt = comment_cartouche_line(ctxt) ^^ { case (x, c) => (Token(Token.Kind.FORMAL_COMMENT, x), c) } val other = other_token(keywords) ^^ { case x => (x, Scan.Finished) } - string | (alt_string | (verb | (cart | (cmt | (formal_cmt | other))))) + string | (alt_string | (cart | (cmt | (formal_cmt | other)))) } } /* explode */ def explode(keywords: Keyword.Keywords, inp: CharSequence): List[Token] = Parsers.parseAll(Parsers.rep(Parsers.token(keywords)), Scan.char_reader(inp)) match { case Parsers.Success(tokens, _) => tokens case _ => error("Unexpected failure of tokenizing input:\n" + inp.toString) } def explode_line(keywords: Keyword.Keywords, inp: CharSequence, context: Scan.Line_Context) : (List[Token], Scan.Line_Context) = { var in: input.Reader[Char] = Scan.char_reader(inp) val toks = new mutable.ListBuffer[Token] var ctxt = context while (!in.atEnd) { Parsers.parse(Parsers.token_line(keywords, ctxt), in) match { case Parsers.Success((x, c), rest) => toks += x; ctxt = c; in = rest case Parsers.NoSuccess(_, rest) => error("Unexpected failure of tokenizing input:\n" + rest.source.toString) } } (toks.toList, ctxt) } val newline: Token = explode(Keyword.Keywords.empty, "\n").head /* embedded */ def read_embedded(keywords: Keyword.Keywords, inp: CharSequence): Option[Token] = explode(keywords, inp) match { case List(tok) if tok.is_embedded => Some(tok) case _ => None } /* names */ def read_name(keywords: Keyword.Keywords, inp: CharSequence): Option[Token] = explode(keywords, inp) match { case List(tok) if tok.is_name => Some(tok) case _ => None } def quote_name(keywords: Keyword.Keywords, name: String): String = if (read_name(keywords, name).isDefined) name else quote(name.replace("\"", "\\\"")) /* plain antiquotation (0 or 1 args) */ def read_antiq_arg(keywords: Keyword.Keywords, inp: CharSequence): Option[(String, Option[String])] = explode(keywords, inp).filter(_.is_proper) match { case List(t) if t.is_name => Some(t.content, None) case List(t1, t2) if t1.is_name && t2.is_embedded => Some(t1.content, Some(t2.content)) case _ => None } /* implode */ def implode(toks: List[Token]): String = toks match { case List(tok) => tok.source case _ => toks.map(_.source).mkString } /* token reader */ object Pos { val none: Pos = new Pos(0, 0, "", "") val start: Pos = new Pos(1, 1, "", "") def file(file: String): Pos = new Pos(1, 1, file, "") def id(id: String): Pos = new Pos(0, 1, "", id) val command: Pos = id(Markup.COMMAND) } final class Pos private[Token]( val line: Int, val offset: Symbol.Offset, val file: String, val id: String) extends input.Position { def column = 0 def lineContents = "" def advance(token: Token): Pos = advance(token.source) def advance(source: String): Pos = { var line1 = line var offset1 = offset for (s <- Symbol.iterator(source)) { if (line1 > 0 && Symbol.is_newline(s)) line1 += 1 if (offset1 > 0) offset1 += 1 } if (line1 == line && offset1 == offset) this else new Pos(line1, offset1, file, id) } private def position(end_offset: Symbol.Offset): Position.T = (if (line > 0) Position.Line(line) else Nil) ::: (if (offset > 0) Position.Offset(offset) else Nil) ::: (if (end_offset > 0) Position.End_Offset(end_offset) else Nil) ::: (if (file != "") Position.File(file) else Nil) ::: (if (id != "") Position.Id_String(id) else Nil) def position(): Position.T = position(0) def position(token: Token): Position.T = position(advance(token).offset) def position(source: String): Position.T = position(advance(source).offset) override def toString: String = Position.here(position(), delimited = false) } abstract class Reader extends input.Reader[Token] private class Token_Reader(tokens: List[Token], val pos: Pos) extends Reader { def first: Token = tokens.head def rest: Token_Reader = new Token_Reader(tokens.tail, pos.advance(first)) def atEnd: Boolean = tokens.isEmpty } def reader(tokens: List[Token], start: Token.Pos): Reader = new Token_Reader(tokens, start) } sealed case class Token(kind: Token.Kind.Value, source: String) { def is_command: Boolean = kind == Token.Kind.COMMAND def is_command(name: String): Boolean = kind == Token.Kind.COMMAND && source == name def is_keyword: Boolean = kind == Token.Kind.KEYWORD def is_keyword(name: String): Boolean = kind == Token.Kind.KEYWORD && source == name def is_keyword(name: Char): Boolean = kind == Token.Kind.KEYWORD && source.length == 1 && source(0) == name def is_delimiter: Boolean = is_keyword && !Symbol.is_ascii_identifier(source) def is_ident: Boolean = kind == Token.Kind.IDENT def is_sym_ident: Boolean = kind == Token.Kind.SYM_IDENT def is_string: Boolean = kind == Token.Kind.STRING def is_nat: Boolean = kind == Token.Kind.NAT def is_float: Boolean = kind == Token.Kind.FLOAT def is_name: Boolean = kind == Token.Kind.IDENT || kind == Token.Kind.LONG_IDENT || kind == Token.Kind.SYM_IDENT || kind == Token.Kind.STRING || kind == Token.Kind.NAT def is_embedded: Boolean = is_name || kind == Token.Kind.CARTOUCHE || kind == Token.Kind.VAR || kind == Token.Kind.TYPE_IDENT || kind == Token.Kind.TYPE_VAR - def is_text: Boolean = is_embedded || kind == Token.Kind.VERBATIM def is_space: Boolean = kind == Token.Kind.SPACE def is_informal_comment: Boolean = kind == Token.Kind.INFORMAL_COMMENT def is_formal_comment: Boolean = kind == Token.Kind.FORMAL_COMMENT def is_marker: Boolean = kind == Token.Kind.FORMAL_COMMENT && (source.startsWith(Symbol.marker) || source.startsWith(Symbol.marker_decoded)) def is_comment: Boolean = is_informal_comment || is_formal_comment def is_ignored: Boolean = is_space || is_informal_comment def is_proper: Boolean = !is_space && !is_comment def is_error: Boolean = kind == Token.Kind.ERROR def is_unparsed: Boolean = kind == Token.Kind.UNPARSED def is_unfinished: Boolean = is_error && (source.startsWith("\"") || source.startsWith("`") || - source.startsWith("{*") || source.startsWith("(*") || source.startsWith(Symbol.open) || source.startsWith(Symbol.open_decoded)) def is_open_bracket: Boolean = is_keyword && Word.open_brackets.exists(is_keyword) def is_close_bracket: Boolean = is_keyword && Word.close_brackets.exists(is_keyword) def is_begin: Boolean = is_keyword("begin") def is_end: Boolean = is_command("end") def is_begin_or_command: Boolean = is_begin || is_command def symbol_length: Symbol.Offset = Symbol.iterator(source).length def content: String = if (kind == Token.Kind.STRING) Scan.Parsers.quoted_content("\"", source) else if (kind == Token.Kind.ALT_STRING) Scan.Parsers.quoted_content("`", source) - else if (kind == Token.Kind.VERBATIM) Scan.Parsers.verbatim_content(source) else if (kind == Token.Kind.CARTOUCHE) Scan.Parsers.cartouche_content(source) else if (kind == Token.Kind.INFORMAL_COMMENT) Scan.Parsers.comment_content(source) else if (kind == Token.Kind.FORMAL_COMMENT) Comment.content(source) else source def is_system_name: Boolean = { val s = content is_name && Path.is_wellformed(s) && !s.exists(Symbol.is_ascii_blank) && !Path.is_reserved(s) } } diff --git a/src/Pure/ML/ml_antiquotations.ML b/src/Pure/ML/ml_antiquotations.ML --- a/src/Pure/ML/ml_antiquotations.ML +++ b/src/Pure/ML/ml_antiquotations.ML @@ -1,447 +1,447 @@ (* Title: Pure/ML/ml_antiquotations.ML Author: Makarius Miscellaneous ML antiquotations. *) signature ML_ANTIQUOTATIONS = sig val make_judgment: Proof.context -> term -> term val dest_judgment: Proof.context -> term -> term end; structure ML_Antiquotations: ML_ANTIQUOTATIONS = struct (* ML support *) val _ = Theory.setup (ML_Antiquotation.inline \<^binding>\undefined\ (Scan.succeed "(raise General.Match)") #> ML_Antiquotation.inline \<^binding>\assert\ (Scan.succeed "(fn b => if b then () else raise General.Fail \"Assertion failed\")") #> ML_Antiquotation.declaration_embedded \<^binding>\print\ (Scan.lift (Scan.optional Parse.embedded "Output.writeln")) (fn src => fn output => fn ctxt => let val struct_name = ML_Context.struct_name ctxt; val (_, pos) = Token.name_of_src src; val (a, ctxt') = ML_Context.variant "output" ctxt; val env = "val " ^ a ^ ": string -> unit =\n\ \ (" ^ output ^ ") o (fn s => s ^ Position.here (" ^ ML_Syntax.print_position pos ^ "));\n"; val body = "(fn x => (" ^ struct_name ^ "." ^ a ^ " (" ^ ML_Pretty.make_string_fn ^ " x); x))"; in (K (env, body), ctxt') end) #> ML_Antiquotation.value \<^binding>\rat\ (Scan.lift (Scan.optional (Args.$$$ "~" >> K ~1) 1 -- Parse.nat -- Scan.optional (Args.$$$ "/" |-- Parse.nat) 1) >> (fn ((sign, a), b) => "Rat.make " ^ ML_Syntax.print_pair ML_Syntax.print_int ML_Syntax.print_int (sign * a, b))) #> ML_Antiquotation.conditional \<^binding>\if_linux\ (fn _ => ML_System.platform_is_linux) #> ML_Antiquotation.conditional \<^binding>\if_macos\ (fn _ => ML_System.platform_is_macos) #> ML_Antiquotation.conditional \<^binding>\if_windows\ (fn _ => ML_System.platform_is_windows) #> ML_Antiquotation.conditional \<^binding>\if_unix\ (fn _ => ML_System.platform_is_unix)); (* formal entities *) val _ = Theory.setup (ML_Antiquotation.value_embedded \<^binding>\system_option\ (Args.context -- Scan.lift Parse.embedded_position >> (fn (ctxt, (name, pos)) => (Completion.check_option (Options.default ()) ctxt (name, pos) |> ML_Syntax.print_string))) #> ML_Antiquotation.value_embedded \<^binding>\theory\ (Args.context -- Scan.lift Parse.embedded_position >> (fn (ctxt, (name, pos)) => (Theory.check {long = false} ctxt (name, pos); "Context.get_theory {long = false} (Proof_Context.theory_of ML_context) " ^ ML_Syntax.print_string name)) || Scan.succeed "Proof_Context.theory_of ML_context") #> ML_Antiquotation.value_embedded \<^binding>\theory_context\ (Args.context -- Scan.lift Parse.embedded_position >> (fn (ctxt, (name, pos)) => (Theory.check {long = false} ctxt (name, pos); "Proof_Context.get_global (Proof_Context.theory_of ML_context) " ^ ML_Syntax.print_string name))) #> ML_Antiquotation.inline \<^binding>\context\ (Args.context >> (fn ctxt => ML_Context.struct_name ctxt ^ ".ML_context")) #> ML_Antiquotation.inline_embedded \<^binding>\typ\ (Args.typ >> (ML_Syntax.atomic o ML_Syntax.print_typ)) #> ML_Antiquotation.inline_embedded \<^binding>\term\ (Args.term >> (ML_Syntax.atomic o ML_Syntax.print_term)) #> ML_Antiquotation.inline_embedded \<^binding>\prop\ (Args.prop >> (ML_Syntax.atomic o ML_Syntax.print_term)) #> ML_Antiquotation.value_embedded \<^binding>\ctyp\ (Args.typ >> (fn T => "Thm.ctyp_of ML_context " ^ ML_Syntax.atomic (ML_Syntax.print_typ T))) #> ML_Antiquotation.value_embedded \<^binding>\cterm\ (Args.term >> (fn t => "Thm.cterm_of ML_context " ^ ML_Syntax.atomic (ML_Syntax.print_term t))) #> ML_Antiquotation.value_embedded \<^binding>\cprop\ (Args.prop >> (fn t => "Thm.cterm_of ML_context " ^ ML_Syntax.atomic (ML_Syntax.print_term t))) #> ML_Antiquotation.inline_embedded \<^binding>\oracle_name\ (Args.context -- Scan.lift Parse.embedded_position >> (fn (ctxt, (name, pos)) => ML_Syntax.print_string (Thm.check_oracle ctxt (name, pos))))); (* type classes *) fun class syn = Args.context -- Scan.lift Parse.embedded_inner_syntax >> (fn (ctxt, s) => Proof_Context.read_class ctxt s |> syn ? Lexicon.mark_class |> ML_Syntax.print_string); val _ = Theory.setup (ML_Antiquotation.inline_embedded \<^binding>\class\ (class false) #> ML_Antiquotation.inline_embedded \<^binding>\class_syntax\ (class true) #> ML_Antiquotation.inline_embedded \<^binding>\sort\ (Args.context -- Scan.lift Parse.embedded_inner_syntax >> (fn (ctxt, s) => ML_Syntax.atomic (ML_Syntax.print_sort (Syntax.read_sort ctxt s))))); (* type constructors *) fun type_name kind check = Args.context -- Scan.lift (Parse.token Parse.embedded) >> (fn (ctxt, tok) => let val s = Token.inner_syntax_of tok; val (_, pos) = Input.source_content (Token.input_of tok); val Type (c, _) = Proof_Context.read_type_name {proper = true, strict = false} ctxt s; val decl = Type.the_decl (Proof_Context.tsig_of ctxt) (c, pos); val res = (case try check (c, decl) of SOME res => res | NONE => error ("Not a " ^ kind ^ ": " ^ quote c ^ Position.here pos)); in ML_Syntax.print_string res end); val _ = Theory.setup (ML_Antiquotation.inline_embedded \<^binding>\type_name\ (type_name "logical type" (fn (c, Type.LogicalType _) => c)) #> ML_Antiquotation.inline_embedded \<^binding>\type_abbrev\ (type_name "type abbreviation" (fn (c, Type.Abbreviation _) => c)) #> ML_Antiquotation.inline_embedded \<^binding>\nonterminal\ (type_name "nonterminal" (fn (c, Type.Nonterminal) => c)) #> ML_Antiquotation.inline_embedded \<^binding>\type_syntax\ (type_name "type" (fn (c, _) => Lexicon.mark_type c))); (* constants *) fun const_name check = Args.context -- Scan.lift (Parse.token Parse.embedded) >> (fn (ctxt, tok) => let val s = Token.inner_syntax_of tok; val (_, pos) = Input.source_content (Token.input_of tok); val Const (c, _) = Proof_Context.read_const {proper = true, strict = false} ctxt s; val res = check (Proof_Context.consts_of ctxt, c) handle TYPE (msg, _, _) => error (msg ^ Position.here pos); in ML_Syntax.print_string res end); val _ = Theory.setup (ML_Antiquotation.inline_embedded \<^binding>\const_name\ (const_name (fn (consts, c) => (Consts.the_const consts c; c))) #> ML_Antiquotation.inline_embedded \<^binding>\const_abbrev\ (const_name (fn (consts, c) => (Consts.the_abbreviation consts c; c))) #> ML_Antiquotation.inline_embedded \<^binding>\const_syntax\ (const_name (fn (_, c) => Lexicon.mark_const c)) #> ML_Antiquotation.inline_embedded \<^binding>\syntax_const\ (Args.context -- Scan.lift Parse.embedded_position >> (fn (ctxt, arg) => ML_Syntax.print_string (Proof_Context.check_syntax_const ctxt arg))) #> ML_Antiquotation.inline_embedded \<^binding>\const\ (Args.context -- Scan.lift (Parse.position Parse.embedded_inner_syntax) -- Scan.optional (Scan.lift (Args.$$$ "(") |-- Parse.enum1' "," Args.typ --| Scan.lift (Args.$$$ ")")) [] >> (fn ((ctxt, (raw_c, pos)), Ts) => let val Const (c, _) = Proof_Context.read_const {proper = true, strict = true} ctxt raw_c; val consts = Proof_Context.consts_of ctxt; val n = length (Consts.typargs consts (c, Consts.type_scheme consts c)); val _ = length Ts <> n andalso error ("Constant requires " ^ string_of_int n ^ " type argument(s): " ^ quote c ^ enclose "(" ")" (commas (replicate n "_")) ^ Position.here pos); val const = Const (c, Consts.instance consts (c, Ts)); in ML_Syntax.atomic (ML_Syntax.print_term const) end))); (* object-logic judgment *) fun make_judgment ctxt = let val const = Object_Logic.judgment_const ctxt in fn t => Const const $ t end; fun dest_judgment ctxt = let val is_judgment = Object_Logic.is_judgment ctxt; val drop_judgment = Object_Logic.drop_judgment ctxt; in fn t => if is_judgment t then drop_judgment t else raise TERM ("dest_judgment", [t]) end; val _ = Theory.setup (ML_Antiquotation.value \<^binding>\make_judgment\ (Scan.succeed "ML_Antiquotations.make_judgment ML_context") #> ML_Antiquotation.value \<^binding>\dest_judgment\ (Scan.succeed "ML_Antiquotations.dest_judgment ML_context")); (* type/term constructors *) local val keywords = Keyword.add_minor_keywords ["for", "=>"] Keyword.empty_keywords; -val parse_name = Parse.input Parse.name; +val parse_name_args = + Parse.input Parse.name -- Scan.repeat Parse.embedded_ml; -val parse_args = Scan.repeat Parse.embedded_ml_underscore; -val parse_for_args = Scan.optional (Parse.$$$ "for" |-- Parse.!!! parse_args) []; +val parse_for_args = + Scan.optional (Parse.$$$ "for" |-- Parse.!!! (Scan.repeat1 Parse.embedded_ml)) []; fun parse_body b = - if b then Parse.$$$ "=>" |-- Parse.!!! Parse.embedded_input >> (ML_Lex.read_source #> single) - else Scan.succeed []; + if b then Parse.$$$ "=>" |-- Parse.!!! (Parse.embedded_ml >> single) else Scan.succeed []; fun is_dummy [Antiquote.Text tok] = ML_Lex.content_of tok = "_" | is_dummy _ = false; val ml = ML_Lex.tokenize_no_range; val ml_range = ML_Lex.tokenize_range; val ml_dummy = ml "_"; fun ml_enclose range x = ml "(" @ x @ ml_range range ")"; fun ml_parens x = ml "(" @ x @ ml ")"; fun ml_bracks x = ml "[" @ x @ ml "]"; fun ml_commas xs = flat (separate (ml ", ") xs); val ml_list = ml_bracks o ml_commas; val ml_string = ml o ML_Syntax.print_string; fun ml_pair (x, y) = ml_parens (ml_commas [x, y]); fun type_antiquotation binding {function} = ML_Context.add_antiquotation binding true (fn range => fn src => fn ctxt => let val ((s, type_args), fn_body) = src - |> Parse.read_embedded_src ctxt keywords (parse_name -- parse_args -- parse_body function); + |> Parse.read_embedded_src ctxt keywords (parse_name_args -- parse_body function); val pos = Input.pos_of s; val Type (c, Ts) = Proof_Context.read_type_name {proper = true, strict = true} ctxt (Syntax.implode_input s); val n = length Ts; val _ = length type_args = n orelse error ("Type constructor " ^ quote (Proof_Context.markup_type ctxt c) ^ " takes " ^ string_of_int n ^ " argument(s)" ^ Position.here pos); val (decls1, ctxt1) = ML_Context.expand_antiquotes_list type_args ctxt; val (decls2, ctxt2) = ML_Context.expand_antiquotes_list fn_body ctxt1; fun decl' ctxt' = let val (ml_args_env, ml_args_body) = split_list (decls1 ctxt'); val (ml_fn_env, ml_fn_body) = split_list (decls2 ctxt'); val ml1 = ml_enclose range (ml "Term.Type " @ ml_pair (ml_string c, ml_list ml_args_body)); val ml2 = if function then ml_enclose range (ml_range range "fn " @ ml1 @ ml "=> " @ flat ml_fn_body @ ml "| T => " @ ml_range range "raise" @ ml " Term.TYPE (" @ ml_string ("Type_fn " ^ quote c) @ ml ", [T], [])") else ml1; in (flat (ml_args_env @ ml_fn_env), ml2) end; in (decl', ctxt2) end); fun const_antiquotation binding {pattern, function} = ML_Context.add_antiquotation binding true (fn range => fn src => fn ctxt => let val (((s, type_args), term_args), fn_body) = src |> Parse.read_embedded_src ctxt keywords - (parse_name -- parse_args -- parse_for_args -- parse_body function); + (parse_name_args -- parse_for_args -- parse_body function); val Const (c, T) = Proof_Context.read_const {proper = true, strict = true} ctxt (Syntax.implode_input s); val consts = Proof_Context.consts_of ctxt; val type_paths = Consts.type_arguments consts c; val type_params = map Term.dest_TVar (Consts.typargs consts (c, T)); val n = length type_params; val m = length (Term.binder_types T); fun err msg = error ("Constant " ^ quote (Proof_Context.markup_const ctxt c) ^ msg ^ Position.here (Input.pos_of s)); val _ = length type_args <> n andalso err (" takes " ^ string_of_int n ^ " type argument(s)"); val _ = length term_args > m andalso Term.is_Type (Term.body_type T) andalso err (" cannot have more than " ^ string_of_int m ^ " argument(s)"); val (decls1, ctxt1) = ML_Context.expand_antiquotes_list type_args ctxt; val (decls2, ctxt2) = ML_Context.expand_antiquotes_list term_args ctxt1; val (decls3, ctxt3) = ML_Context.expand_antiquotes_list fn_body ctxt2; fun decl' ctxt' = let val (ml_args_env1, ml_args_body1) = split_list (decls1 ctxt'); val (ml_args_env2, ml_args_body2) = split_list (decls2 ctxt'); val (ml_fn_env, ml_fn_body) = split_list (decls3 ctxt'); val relevant = map is_dummy type_args ~~ type_paths; fun relevant_path is = not pattern orelse let val p = rev is in relevant |> exists (fn (u, q) => not u andalso is_prefix (op =) p q) end; val ml_typarg = the o AList.lookup (op =) (type_params ~~ ml_args_body1); fun ml_typ is (Type (d, Us)) = if relevant_path is then ml "Term.Type " @ ml_pair (ml_string d, ml_list (map_index (fn (i, U) => ml_typ (i :: is) U) Us)) else ml_dummy | ml_typ is (TVar arg) = if relevant_path is then ml_typarg arg else ml_dummy | ml_typ _ (TFree _) = raise Match; fun ml_app [] = ml "Term.Const " @ ml_pair (ml_string c, ml_typ [] T) | ml_app (u :: us) = ml "Term.$ " @ ml_pair (ml_app us, u); val ml_env = flat (ml_args_env1 @ ml_args_env2 @ ml_fn_env); val ml1 = ml_enclose range (ml_app (rev ml_args_body2)); val ml2 = if function then ml_enclose range (ml_range range "fn " @ ml1 @ ml "=> " @ flat ml_fn_body @ ml "| t => " @ ml_range range "raise" @ ml " Term.TERM (" @ ml_string ("Const_fn " ^ quote c) @ ml ", [t])") else ml1; in (ml_env, ml2) end; in (decl', ctxt3) end); val _ = Theory.setup (type_antiquotation \<^binding>\Type\ {function = false} #> type_antiquotation \<^binding>\Type_fn\ {function = true} #> const_antiquotation \<^binding>\Const\ {pattern = false, function = false} #> const_antiquotation \<^binding>\Const_\ {pattern = true, function = false} #> const_antiquotation \<^binding>\Const_fn\ {pattern = true, function = true}); in end; (* special forms *) val _ = Theory.setup (ML_Antiquotation.special_form \<^binding>\try\ "() |> Basics.try" #> ML_Antiquotation.special_form \<^binding>\can\ "() |> Basics.can"); (* basic combinators *) local val parameter = Parse.position Parse.nat >> (fn (n, pos) => if n > 1 then n else error ("Bad parameter: " ^ string_of_int n ^ Position.here pos)); fun indices n = map string_of_int (1 upto n); fun empty n = replicate_string n " []"; fun dummy n = replicate_string n " _"; fun vars x n = implode (map (fn a => " " ^ x ^ a) (indices n)); fun cons n = implode (map (fn a => " (x" ^ a ^ " :: xs" ^ a ^ ")") (indices n)); val tuple = enclose "(" ")" o commas; fun tuple_empty n = tuple (replicate n "[]"); fun tuple_vars x n = tuple (map (fn a => x ^ a) (indices n)); fun tuple_cons n = "(" ^ tuple_vars "x" n ^ " :: xs)" fun cons_tuple n = tuple (map (fn a => "x" ^ a ^ " :: xs" ^ a) (indices n)); in val _ = Theory.setup (ML_Antiquotation.value \<^binding>\map\ (Scan.lift parameter >> (fn n => "fn f =>\n\ \ let\n\ \ fun map _" ^ empty n ^ " = []\n\ \ | map f" ^ cons n ^ " = f" ^ vars "x" n ^ " :: map f" ^ vars "xs" n ^ "\n\ \ | map _" ^ dummy n ^ " = raise ListPair.UnequalLengths\n" ^ " in map f end")) #> ML_Antiquotation.value \<^binding>\fold\ (Scan.lift parameter >> (fn n => "fn f =>\n\ \ let\n\ \ fun fold _" ^ empty n ^ " a = a\n\ \ | fold f" ^ cons n ^ " a = fold f" ^ vars "xs" n ^ " (f" ^ vars "x" n ^ " a)\n\ \ | fold _" ^ dummy n ^ " _ = raise ListPair.UnequalLengths\n" ^ " in fold f end")) #> ML_Antiquotation.value \<^binding>\fold_map\ (Scan.lift parameter >> (fn n => "fn f =>\n\ \ let\n\ \ fun fold_map _" ^ empty n ^ " a = ([], a)\n\ \ | fold_map f" ^ cons n ^ " a =\n\ \ let\n\ \ val (x, a') = f" ^ vars "x" n ^ " a\n\ \ val (xs, a'') = fold_map f" ^ vars "xs" n ^ " a'\n\ \ in (x :: xs, a'') end\n\ \ | fold_map _" ^ dummy n ^ " _ = raise ListPair.UnequalLengths\n" ^ " in fold_map f end")) #> ML_Antiquotation.value \<^binding>\split_list\ (Scan.lift parameter >> (fn n => "fn list =>\n\ \ let\n\ \ fun split_list [] =" ^ tuple_empty n ^ "\n\ \ | split_list" ^ tuple_cons n ^ " =\n\ \ let val" ^ tuple_vars "xs" n ^ " = split_list xs\n\ \ in " ^ cons_tuple n ^ "end\n\ \ in split_list list end")) #> ML_Antiquotation.value \<^binding>\apply\ (Scan.lift (parameter -- Scan.option (Args.parens (Parse.position Parse.nat))) >> (fn (n, opt_index) => let val cond = (case opt_index of NONE => K true | SOME (index, index_pos) => if 1 <= index andalso index <= n then equal (string_of_int index) else error ("Bad index: " ^ string_of_int index ^ Position.here index_pos)); in "fn f => fn " ^ tuple_vars "x" n ^ " => " ^ tuple (map (fn a => (if cond a then "f x" else "x") ^ a) (indices n)) end))); end; (* outer syntax *) val _ = Theory.setup (ML_Antiquotation.value_embedded \<^binding>\keyword\ (Args.context -- Scan.lift (Parse.embedded_position || Parse.position (Parse.keyword_with (K true))) >> (fn (ctxt, (name, pos)) => if Keyword.is_keyword (Thy_Header.get_keywords' ctxt) name then (Context_Position.report ctxt pos (Token.keyword_markup (true, Markup.keyword2) name); "Parse.$$$ " ^ ML_Syntax.print_string name) else error ("Bad outer syntax keyword " ^ quote name ^ Position.here pos))) #> ML_Antiquotation.value_embedded \<^binding>\command_keyword\ (Args.context -- Scan.lift Parse.embedded_position >> (fn (ctxt, (name, pos)) => (case Keyword.command_markup (Thy_Header.get_keywords' ctxt) name of SOME markup => (Context_Position.reports ctxt [(pos, markup), (pos, Markup.keyword1)]; ML_Syntax.print_pair ML_Syntax.print_string ML_Syntax.print_position (name, pos)) | NONE => error ("Bad outer syntax command " ^ quote name ^ Position.here pos))))); end; diff --git a/src/Pure/ML/ml_statistics.scala b/src/Pure/ML/ml_statistics.scala --- a/src/Pure/ML/ml_statistics.scala +++ b/src/Pure/ML/ml_statistics.scala @@ -1,319 +1,324 @@ /* Title: Pure/ML/ml_statistics.scala Author: Makarius ML runtime statistics. */ package isabelle import scala.annotation.tailrec import scala.collection.mutable import scala.collection.immutable.{SortedSet, SortedMap} import scala.swing.{Frame, Component} import org.jfree.data.xy.{XYSeries, XYSeriesCollection} import org.jfree.chart.{JFreeChart, ChartPanel, ChartFactory} import org.jfree.chart.plot.PlotOrientation object ML_Statistics { /* properties */ val Now = new Properties.Double("now") def now(props: Properties.T): Double = Now.unapply(props).get /* memory status */ val Heap_Size = new Properties.Long("size_heap") val Heap_Free = new Properties.Long("size_heap_free_last_GC") val GC_Percent = new Properties.Int("GC_percent") sealed case class Memory_Status(heap_size: Long, heap_free: Long, gc_percent: Int) { def heap_used: Long = (heap_size - heap_free) max 0 def heap_used_fraction: Double = if (heap_size == 0) 0.0 else heap_used.toDouble / heap_size def gc_progress: Option[Double] = if (1 <= gc_percent && gc_percent <= 100) Some((gc_percent - 1) * 0.01) else None } def memory_status(props: Properties.T): Memory_Status = { val heap_size = Heap_Size.get(props) val heap_free = Heap_Free.get(props) val gc_percent = GC_Percent.get(props) Memory_Status(heap_size, heap_free, gc_percent) } /* monitor process */ def monitor(pid: Long, stats_dir: String = "", delay: Time = Time.seconds(0.5), consume: Properties.T => Unit = Console.println): Unit = { def progress_stdout(line: String): Unit = { val props = Library.space_explode(',', line).flatMap(Properties.Eq.unapply) if (props.nonEmpty) consume(props) } val env_prefix = if (stats_dir.isEmpty) "" else "export POLYSTATSDIR=" + Bash.string(stats_dir) + "\n" Bash.process(env_prefix + "\"$POLYML_EXE\" -q --use src/Pure/ML/ml_statistics.ML --eval " + Bash.string("ML_Statistics.monitor " + ML_Syntax.print_long(pid) + " " + ML_Syntax.print_double(delay.seconds)), cwd = Path.ISABELLE_HOME.file) .result(progress_stdout = progress_stdout, strict = false).check } /* protocol handler */ class Handler extends Session.Protocol_Handler { private var session: Session = null private var monitoring: Future[Unit] = Future.value(()) override def init(session: Session): Unit = synchronized { this.session = session } override def exit(): Unit = synchronized { session = null monitoring.cancel() } private def consume(props: Properties.T): Unit = synchronized { if (session != null) { val props1 = (session.cache.props(props ::: Java_Statistics.jvm_statistics())) session.runtime_statistics.post(Session.Runtime_Statistics(props1)) } } private def ml_statistics(msg: Prover.Protocol_Output): Boolean = synchronized { msg.properties match { case Markup.ML_Statistics(pid, stats_dir) => monitoring = Future.thread("ML_statistics") { monitor(pid, stats_dir = stats_dir, consume = consume) } true case _ => false } } override val functions = List(Markup.ML_Statistics.name -> ml_statistics) } /* memory fields (mega bytes) */ def mem_print(x: Long): Option[String] = if (x == 0L) None else Some(x.toString + " M") def mem_scale(x: Long): Long = x / 1024 / 1024 def mem_field_scale(name: String, x: Double): Double = if (heap_fields._2.contains(name) || program_fields._2.contains(name)) mem_scale(x.toLong).toDouble else x val CODE_SIZE = "size_code" val STACK_SIZE = "size_stacks" val HEAP_SIZE = "size_heap" /* standard fields */ type Fields = (String, List[String]) val tasks_fields: Fields = ("Future tasks", List("tasks_ready", "tasks_pending", "tasks_running", "tasks_passive", "tasks_urgent", "tasks_total")) val workers_fields: Fields = ("Worker threads", List("workers_total", "workers_active", "workers_waiting")) val GC_fields: Fields = ("GCs", List("partial_GCs", "full_GCs", "share_passes")) val heap_fields: Fields = ("Heap", List(HEAP_SIZE, "size_allocation", "size_allocation_free", "size_heap_free_last_full_GC", "size_heap_free_last_GC")) val program_fields: Fields = ("Program", List("size_code", "size_stacks")) val threads_fields: Fields = ("Threads", List("threads_total", "threads_in_ML", "threads_wait_condvar", "threads_wait_IO", "threads_wait_mutex", "threads_wait_signal")) val time_fields: Fields = ("Time", List("time_elapsed", "time_elapsed_GC", "time_CPU", "time_GC")) val speed_fields: Fields = ("Speed", List("speed_CPU", "speed_GC")) private val time_speed = Map("time_CPU" -> "speed_CPU", "time_GC" -> "speed_GC") val java_heap_fields: Fields = ("Java heap", List("java_heap_size", "java_heap_used")) val java_thread_fields: Fields = ("Java threads", List("java_threads_total", "java_workers_total", "java_workers_active")) val main_fields: List[Fields] = List(heap_fields, tasks_fields, workers_fields) val other_fields: List[Fields] = List(threads_fields, GC_fields, program_fields, time_fields, speed_fields, java_heap_fields, java_thread_fields) val all_fields: List[Fields] = main_fields ::: other_fields /* content interpretation */ final case class Entry(time: Double, data: Map[String, Double]) { def get(field: String): Double = data.getOrElse(field, 0.0) } val empty: ML_Statistics = apply(Nil) def apply(ml_statistics0: List[Properties.T], heading: String = "", - domain: String => Boolean = (key: String) => true): ML_Statistics = + domain: String => Boolean = _ => true): ML_Statistics = { require(ml_statistics0.forall(props => Now.unapply(props).isDefined), "missing \"now\" field") val ml_statistics = ml_statistics0.sortBy(now) val time_start = if (ml_statistics.isEmpty) 0.0 else now(ml_statistics.head) val duration = if (ml_statistics.isEmpty) 0.0 else now(ml_statistics.last) - time_start val fields = SortedSet.empty[String] ++ (for { props <- ml_statistics.iterator (x, _) <- props.iterator if x != Now.name && domain(x) } yield x) val content = { var last_edge = Map.empty[String, (Double, Double, Double)] val result = new mutable.ListBuffer[ML_Statistics.Entry] for (props <- ml_statistics) { val time = now(props) - time_start // rising edges -- relative speed val speeds = (for { (key, value) <- props.iterator key1 <- time_speed.get(key) if domain(key1) } yield { val (x0, y0, s0) = last_edge.getOrElse(key, (0.0, 0.0, 0.0)) val x1 = time val y1 = java.lang.Double.parseDouble(value) val s1 = if (x1 == x0) 0.0 else (y1 - y0) / (x1 - x0) if (y1 > y0) { last_edge += (key -> (x1, y1, s1)) (key1, s1.toString) } else (key1, s0.toString) }).toList val data = SortedMap.empty[String, Double] ++ (for { (x, y) <- props.iterator ++ speeds.iterator if x != Now.name && domain(x) z = java.lang.Double.parseDouble(y) if z != 0.0 } yield { (x.intern, mem_field_scale(x, z)) }) result += ML_Statistics.Entry(time, data) } result.toList } new ML_Statistics(heading, fields, content, time_start, duration) } } final class ML_Statistics private( val heading: String, val fields: Set[String], val content: List[ML_Statistics.Entry], val time_start: Double, val duration: Double) { + override def toString: String = + if (content.isEmpty) "ML_Statistics.empty" + else "ML_Statistics(length = " + content.length + ", fields = " + fields.size + ")" + + /* content */ def maximum(field: String): Double = content.foldLeft(0.0) { case (m, e) => m max e.get(field) } def average(field: String): Double = { @tailrec def sum(t0: Double, list: List[ML_Statistics.Entry], acc: Double): Double = list match { case Nil => acc case e :: es => val t = e.time sum(t, es, (t - t0) * e.get(field) + acc) } content match { case Nil => 0.0 case List(e) => e.get(field) case e :: es => sum(e.time, es, 0.0) / duration } } /* charts */ def update_data(data: XYSeriesCollection, selected_fields: List[String]): Unit = { - data.removeAllSeries + data.removeAllSeries() for (field <- selected_fields) { val series = new XYSeries(field) content.foreach(entry => series.add(entry.time, entry.get(field))) data.addSeries(series) } } def chart(title: String, selected_fields: List[String]): JFreeChart = { val data = new XYSeriesCollection update_data(data, selected_fields) ChartFactory.createXYLineChart(title, "time", "value", data, PlotOrientation.VERTICAL, true, true, true) } def chart(fields: ML_Statistics.Fields): JFreeChart = chart(fields._1, fields._2) def show_frames(fields: List[ML_Statistics.Fields] = ML_Statistics.main_fields): Unit = fields.map(chart).foreach(c => GUI_Thread.later { new Frame { iconImage = GUI.isabelle_image() title = heading contents = Component.wrap(new ChartPanel(c)) visible = true } }) } diff --git a/src/Pure/PIDE/command.ML b/src/Pure/PIDE/command.ML --- a/src/Pure/PIDE/command.ML +++ b/src/Pure/PIDE/command.ML @@ -1,512 +1,504 @@ (* Title: Pure/PIDE/command.ML Author: Makarius Prover command execution: read -- eval -- print. *) signature COMMAND = sig type blob = {file_node: string, src_path: Path.T, content: (SHA1.digest * string list) option} val read_file: Path.T -> Position.T -> bool -> Path.T -> Token.file val read_thy: Toplevel.state -> theory val read: Keyword.keywords -> theory -> Path.T-> (unit -> theory) -> blob Exn.result list * int -> Token.T list -> Toplevel.transition val read_span: Keyword.keywords -> Toplevel.state -> Path.T -> (unit -> theory) -> Command_Span.span -> Toplevel.transition type eval val eval_command_id: eval -> Document_ID.command val eval_exec_id: eval -> Document_ID.exec val eval_eq: eval * eval -> bool val eval_running: eval -> bool val eval_finished: eval -> bool val eval_result_command: eval -> Toplevel.transition val eval_result_state: eval -> Toplevel.state val eval: Keyword.keywords -> Path.T -> (unit -> theory) -> blob Exn.result list * int -> Document_ID.command -> Token.T list -> eval -> eval type print type print_fn = Toplevel.transition -> Toplevel.state -> unit val print0: {pri: int, print_fn: print_fn} -> eval -> print val print: bool -> (string * string list) list -> Keyword.keywords -> string -> eval -> print list -> print list option val parallel_print: print -> bool type print_function = {keywords: Keyword.keywords, command_name: string, args: string list, exec_id: Document_ID.exec} -> {delay: Time.time option, pri: int, persistent: bool, strict: bool, print_fn: print_fn} option val print_function: string -> print_function -> unit val no_print_function: string -> unit type exec = eval * print list val init_exec: theory option -> exec val no_exec: exec val exec_ids: exec option -> Document_ID.exec list val exec: Document_ID.execution -> exec -> unit val exec_parallel_prints: Document_ID.execution -> Future.task list -> exec -> exec option end; structure Command: COMMAND = struct (** main phases of execution **) fun task_context group f = f |> Future.interruptible_task |> Future.task_context "Command.run_process" group; (* read *) type blob = {file_node: string, src_path: Path.T, content: (SHA1.digest * string list) option}; fun read_file_node file_node master_dir pos delimited src_path = let val _ = if Context_Position.pide_reports () then Position.report pos (Markup.language_path delimited) else (); fun read_file () = let val path = File.check_file (File.full_path master_dir src_path); val text = File.read path; val file_pos = Path.position path; in (text, file_pos) end; fun read_url () = let val text = Isabelle_System.download file_node; val file_pos = Position.file file_node; in (text, file_pos) end; val (text, file_pos) = (case try Url.explode file_node of NONE => read_file () | SOME (Url.File _) => read_file () | _ => read_url ()); val lines = split_lines text; val digest = SHA1.digest text; in {src_path = src_path, lines = lines, digest = digest, pos = Position.copy_id pos file_pos} end handle ERROR msg => error (msg ^ Position.here pos); val read_file = read_file_node ""; local fun blob_file src_path lines digest file_node = let val file_pos = Position.file file_node |> (case Position.id_of (Position.thread_data ()) of NONE => I | SOME exec_id => Position.put_id exec_id); in {src_path = src_path, lines = lines, digest = digest, pos = file_pos} end fun resolve_files master_dir (blobs, blobs_index) toks = (case Outer_Syntax.parse_spans toks of [Command_Span.Span (Command_Span.Command_Span _, _)] => (case try (nth toks) blobs_index of SOME tok => let val source = Token.input_of tok; val pos = Input.pos_of source; val delimited = Input.is_delimited source; fun make_file (Exn.Res {file_node, src_path, content = NONE}) = Exn.interruptible_capture (fn () => read_file_node file_node master_dir pos delimited src_path) () | make_file (Exn.Res {file_node, src_path, content = SOME (digest, lines)}) = (Position.report pos (Markup.language_path delimited); Exn.Res (blob_file src_path lines digest file_node)) | make_file (Exn.Exn e) = Exn.Exn e; val files = map make_file blobs; in toks |> map_index (fn (i, tok) => if i = blobs_index then Token.put_files files tok else tok) end | NONE => toks) | _ => toks); fun reports_of_token keywords tok = let val malformed_symbols = Input.source_explode (Token.input_of tok) |> map_filter (fn (sym, pos) => if Symbol.is_malformed sym then SOME ((pos, Markup.bad ()), "Malformed symbolic character") else NONE); val is_malformed = Token.is_error tok orelse not (null malformed_symbols); val reports = Token.reports keywords tok @ Token.completion_report tok @ malformed_symbols; in (is_malformed, reports) end; in fun read_thy st = Toplevel.theory_of st handle Toplevel.UNDEF => Pure_Syn.bootstrap_thy; fun read keywords thy master_dir init blobs_info span = let val command_reports = Outer_Syntax.command_reports thy; val token_reports = map (reports_of_token keywords) span; val _ = Position.reports_text (maps #2 token_reports @ maps command_reports span); - val verbatim = - span |> map_filter (fn tok => - if Token.kind_of tok = Token.Verbatim then SOME (Token.pos_of tok) else NONE); - val _ = - if null verbatim then () - else legacy_feature ("Old-style {* verbatim *} token -- use \cartouche\ instead" ^ - Position.here_list verbatim); - val core_range = Token.core_range_of span; val tr = if exists #1 token_reports then Toplevel.malformed (#1 core_range) "Malformed command syntax" else Outer_Syntax.parse_span thy init (resolve_files master_dir blobs_info span); val _ = if Toplevel.is_ignored tr orelse Toplevel.is_malformed tr then () else Position.report (#1 core_range) (Markup.command_span (Toplevel.name_of tr)); in tr end; end; fun read_span keywords st master_dir init = Command_Span.content #> read keywords (read_thy st) master_dir init ([], ~1); (* eval *) type eval_state = {failed: bool, command: Toplevel.transition, state: Toplevel.state}; fun init_eval_state opt_thy = {failed = false, command = Toplevel.empty, state = (case opt_thy of NONE => Toplevel.init_toplevel () | SOME thy => Toplevel.theory_toplevel thy)}; datatype eval = Eval of {command_id: Document_ID.command, exec_id: Document_ID.exec, eval_process: eval_state lazy}; fun eval_command_id (Eval {command_id, ...}) = command_id; fun eval_exec_id (Eval {exec_id, ...}) = exec_id; val eval_eq = op = o apply2 eval_exec_id; val eval_running = Execution.is_running_exec o eval_exec_id; fun eval_finished (Eval {eval_process, ...}) = Lazy.is_finished eval_process; fun eval_result (Eval {eval_process, ...}) = Exn.release (Lazy.finished_result eval_process); val eval_result_command = #command o eval_result; val eval_result_state = #state o eval_result; local fun reset_state keywords tr st0 = Toplevel.setmp_thread_position tr (fn () => let val name = Toplevel.name_of tr; val res = if Keyword.is_theory_body keywords name then Toplevel.reset_theory st0 else if Keyword.is_proof keywords name then Toplevel.reset_proof st0 else if Keyword.is_theory_end keywords name then (case Toplevel.reset_notepad st0 of NONE => Toplevel.reset_theory st0 | some => some) else NONE; in (case res of NONE => st0 | SOME st => (Output.error_message (Toplevel.type_error tr ^ " -- using reset state"); st)) end) (); fun run keywords int tr st = if Future.proofs_enabled 1 andalso Keyword.is_diag keywords (Toplevel.name_of tr) then let val (tr1, tr2) = Toplevel.fork_presentation tr; val _ = Execution.fork {name = "Toplevel.diag", pos = Toplevel.pos_of tr, pri = ~1} (fn () => Toplevel.command_exception int tr1 st); in Toplevel.command_errors int tr2 st end else Toplevel.command_errors int tr st; fun check_token_comments ctxt tok = (Document_Output.check_comments ctxt (Input.source_explode (Token.input_of tok)); []) handle exn => if Exn.is_interrupt exn then Exn.reraise exn else Runtime.exn_messages exn; fun check_span_comments ctxt span tr = Toplevel.setmp_thread_position tr (fn () => maps (check_token_comments ctxt) span) (); fun report_indent tr st = (case try Toplevel.proof_of st of SOME prf => let val keywords = Thy_Header.get_keywords (Proof.theory_of prf) in if Keyword.command_kind keywords (Toplevel.name_of tr) = SOME Keyword.prf_script then (case try (Thm.nprems_of o #goal o Proof.goal) prf of NONE => () | SOME 0 => () | SOME n => let val report = Markup.markup_only (Markup.command_indent (n - 1)); in Toplevel.setmp_thread_position tr (fn () => Output.report [report]) () end) else () end | NONE => ()); fun status tr m = Toplevel.setmp_thread_position tr (fn () => Output.status [Markup.markup_only m]) (); fun eval_state keywords span tr ({state, ...}: eval_state) = let val _ = Thread_Attributes.expose_interrupt (); val st = reset_state keywords tr state; val _ = report_indent tr st; val _ = status tr Markup.running; val (errs1, result) = run keywords true tr st; val errs2 = (case result of NONE => [] | SOME st' => check_span_comments (Toplevel.presentation_context st') span tr); val errs = errs1 @ errs2; val _ = List.app (Future.error_message (Toplevel.pos_of tr)) errs; in (case result of NONE => let val _ = status tr Markup.failed; val _ = status tr Markup.finished; val _ = if null errs then (status tr Markup.canceled; Exn.interrupt ()) else (); in {failed = true, command = tr, state = st} end | SOME st' => let val _ = if Keyword.is_theory_end keywords (Toplevel.name_of tr) andalso can (Toplevel.end_theory Position.none) st' then status tr Markup.finalized else (); val _ = status tr Markup.finished; in {failed = false, command = tr, state = st'} end) end; in fun eval keywords master_dir init blobs_info command_id span eval0 = let val exec_id = Document_ID.make (); fun process () = let val eval_state0 = eval_result eval0; val thy = read_thy (#state eval_state0); val tr = Position.setmp_thread_data (Position.id_only (Document_ID.print exec_id)) (fn () => read keywords thy master_dir init blobs_info span |> Toplevel.exec_id exec_id) (); in eval_state keywords span tr eval_state0 end; in Eval {command_id = command_id, exec_id = exec_id, eval_process = Lazy.lazy_name "Command.eval" process} end; end; (* print *) datatype print = Print of {name: string, args: string list, delay: Time.time option, pri: int, persistent: bool, exec_id: Document_ID.exec, print_process: unit lazy}; fun print_exec_id (Print {exec_id, ...}) = exec_id; val print_eq = op = o apply2 print_exec_id; type print_fn = Toplevel.transition -> Toplevel.state -> unit; type print_function = {keywords: Keyword.keywords, command_name: string, args: string list, exec_id: Document_ID.exec} -> {delay: Time.time option, pri: int, persistent: bool, strict: bool, print_fn: print_fn} option; local val print_functions = Synchronized.var "Command.print_functions" ([]: (string * print_function) list); fun print_error tr opt_context e = (Toplevel.setmp_thread_position tr o Runtime.controlled_execution opt_context) e () handle exn => if Exn.is_interrupt exn then Exn.reraise exn else List.app (Future.error_message (Toplevel.pos_of tr)) (Runtime.exn_messages exn); fun print_finished (Print {print_process, ...}) = Lazy.is_finished print_process; fun print_persistent (Print {persistent, ...}) = persistent; val overlay_ord = prod_ord string_ord (list_ord string_ord); fun make_print (name, args) {delay, pri, persistent, strict, print_fn} eval = let val exec_id = Document_ID.make (); fun process () = let val {failed, command, state = st', ...} = eval_result eval; val tr = Toplevel.exec_id exec_id command; val opt_context = try Toplevel.generic_theory_of st'; in if failed andalso strict then () else print_error tr opt_context (fn () => print_fn tr st') end; in Print { name = name, args = args, delay = delay, pri = pri, persistent = persistent, exec_id = exec_id, print_process = Lazy.lazy_name "Command.print" process} end; fun bad_print name_args exn = make_print name_args {delay = NONE, pri = 0, persistent = false, strict = false, print_fn = fn _ => fn _ => Exn.reraise exn}; in fun print0 {pri, print_fn} = make_print ("", [serial_string ()]) {delay = NONE, pri = pri, persistent = true, strict = true, print_fn = print_fn}; fun print command_visible command_overlays keywords command_name eval old_prints = let val print_functions = Synchronized.value print_functions; fun new_print (name, args) get_pr = let val params = {keywords = keywords, command_name = command_name, args = args, exec_id = eval_exec_id eval}; in (case Exn.capture (Runtime.controlled_execution NONE get_pr) params of Exn.Res NONE => NONE | Exn.Res (SOME pr) => SOME (make_print (name, args) pr eval) | Exn.Exn exn => SOME (bad_print (name, args) exn eval)) end; fun get_print (name, args) = (case find_first (fn Print print => (#name print, #args print) = (name, args)) old_prints of NONE => (case AList.lookup (op =) print_functions name of NONE => SOME (bad_print (name, args) (ERROR ("Missing print function " ^ quote name)) eval) | SOME get_pr => new_print (name, args) get_pr) | some => some); val retained_prints = filter (fn print => print_finished print andalso print_persistent print) old_prints; val visible_prints = if command_visible then fold (fn (name, _) => cons (name, [])) print_functions command_overlays |> sort_distinct overlay_ord |> map_filter get_print else []; val new_prints = visible_prints @ retained_prints; in if eq_list print_eq (old_prints, new_prints) then NONE else SOME new_prints end; fun parallel_print (Print {pri, ...}) = pri <= 0 orelse (Future.enabled () andalso Options.default_bool "parallel_print"); fun print_function name f = Synchronized.change print_functions (fn funs => (if name = "" then error "Unnamed print function" else (); if not (AList.defined (op =) funs name) then () else warning ("Redefining command print function: " ^ quote name); AList.update (op =) (name, f) funs)); fun no_print_function name = Synchronized.change print_functions (filter_out (equal name o #1)); end; val _ = print_function "Execution.print" (fn {args, exec_id, ...} => if null args then SOME {delay = NONE, pri = Task_Queue.urgent_pri + 2, persistent = false, strict = false, print_fn = fn _ => fn _ => Execution.fork_prints exec_id} else NONE); val _ = print_function "print_state" (fn {keywords, command_name, ...} => if Options.default_bool "editor_output_state" andalso Keyword.is_printed keywords command_name then SOME {delay = NONE, pri = Task_Queue.urgent_pri + 1, persistent = false, strict = false, print_fn = fn _ => fn st => if Toplevel.is_proof st then Output.state (Toplevel.string_of_state st) else ()} else NONE); (* combined execution *) type exec = eval * print list; fun init_exec opt_thy : exec = (Eval {command_id = Document_ID.none, exec_id = Document_ID.none, eval_process = Lazy.value (init_eval_state opt_thy)}, []); val no_exec = init_exec NONE; fun exec_ids NONE = [] | exec_ids (SOME (eval, prints)) = eval_exec_id eval :: map print_exec_id prints; local fun run_process execution_id exec_id process = let val group = Future.worker_subgroup () in if Execution.running execution_id exec_id [group] then ignore (task_context group (fn () => Lazy.force_result {strict = true} process) ()) else () end; fun ignore_process process = Lazy.is_running process orelse Lazy.is_finished process; fun run_eval execution_id (Eval {exec_id, eval_process, ...}) = if Lazy.is_finished eval_process then () else run_process execution_id exec_id eval_process; fun fork_print execution_id deps (Print {name, delay, pri, exec_id, print_process, ...}) = let val group = Future.worker_subgroup (); fun fork () = ignore ((singleton o Future.forks) {name = name, group = SOME group, deps = deps, pri = pri, interrupts = true} (fn () => if ignore_process print_process then () else run_process execution_id exec_id print_process)); in (case delay of NONE => fork () | SOME d => ignore (Event_Timer.request {physical = true} (Time.now () + d) fork)) end; fun run_print execution_id (print as Print {exec_id, print_process, ...}) = if ignore_process print_process then () else if parallel_print print then fork_print execution_id [] print else run_process execution_id exec_id print_process; in fun exec execution_id (eval, prints) = (run_eval execution_id eval; List.app (run_print execution_id) prints); fun exec_parallel_prints execution_id deps (exec as (Eval {eval_process, ...}, prints)) = if Lazy.is_finished eval_process then (List.app (fork_print execution_id deps) prints; NONE) else SOME exec; end; end; diff --git a/src/Pure/PIDE/markup.scala b/src/Pure/PIDE/markup.scala --- a/src/Pure/PIDE/markup.scala +++ b/src/Pure/PIDE/markup.scala @@ -1,794 +1,793 @@ /* Title: Pure/PIDE/markup.scala Author: Makarius Quasi-abstract markup elements. */ package isabelle object Markup { /* elements */ object Elements { def apply(elems: Set[String]): Elements = new Elements(elems) def apply(elems: String*): Elements = apply(Set(elems: _*)) val empty: Elements = apply() val full: Elements = new Elements(Set.empty) { override def apply(elem: String): Boolean = true override def toString: String = "Elements.full" } } sealed class Elements private[Markup](private val rep: Set[String]) { def apply(elem: String): Boolean = rep.contains(elem) def + (elem: String): Elements = new Elements(rep + elem) def ++ (elems: Elements): Elements = new Elements(rep ++ elems.rep) def - (elem: String): Elements = new Elements(rep - elem) def -- (elems: Elements): Elements = new Elements(rep -- elems.rep) override def toString: String = rep.mkString("Elements(", ",", ")") } /* properties */ val NAME = "name" val Name = new Properties.String(NAME) val XNAME = "xname" val XName = new Properties.String(XNAME) val KIND = "kind" val Kind = new Properties.String(KIND) val CONTENT = "content" val Content = new Properties.String(CONTENT) val SERIAL = "serial" val Serial = new Properties.Long(SERIAL) val INSTANCE = "instance" val Instance = new Properties.String(INSTANCE) /* basic markup */ val Empty: Markup = Markup("", Nil) val Broken: Markup = Markup("broken", Nil) class Markup_Elem(val name: String) { def apply(props: Properties.T = Nil): Markup = Markup(name, props) def unapply(markup: Markup): Option[Properties.T] = if (markup.name == name) Some(markup.properties) else None } class Markup_String(val name: String, prop: String) { val Prop: Properties.String = new Properties.String(prop) def apply(s: String): Markup = Markup(name, Prop(s)) def unapply(markup: Markup): Option[String] = if (markup.name == name) Prop.unapply(markup.properties) else None def get(markup: Markup): String = unapply(markup).getOrElse("") } class Markup_Int(val name: String, prop: String) { val Prop: Properties.Int = new Properties.Int(prop) def apply(i: Int): Markup = Markup(name, Prop(i)) def unapply(markup: Markup): Option[Int] = if (markup.name == name) Prop.unapply(markup.properties) else None def get(markup: Markup): Int = unapply(markup).getOrElse(0) } class Markup_Long(val name: String, prop: String) { val Prop: Properties.Long = new Properties.Long(prop) def apply(i: Long): Markup = Markup(name, Prop(i)) def unapply(markup: Markup): Option[Long] = if (markup.name == name) Prop.unapply(markup.properties) else None def get(markup: Markup): Long = unapply(markup).getOrElse(0) } /* meta data */ val META_TITLE = "meta_title" val META_CREATOR = "meta_creator" val META_CONTRIBUTOR = "meta_contributor" val META_DATE = "meta_date" val META_LICENSE = "meta_license" val META_DESCRIPTION = "meta_description" /* formal entities */ val BINDING = "binding" val ENTITY = "entity" object Entity { val Def = new Markup_Long(ENTITY, "def") val Ref = new Markup_Long(ENTITY, "ref") object Occ { def unapply(markup: Markup): Option[Long] = Def.unapply(markup) orElse Ref.unapply(markup) } def unapply(markup: Markup): Option[(String, String)] = markup match { case Markup(ENTITY, props) => Some((Kind.get(props), Name.get(props))) case _ => None } } /* completion */ val COMPLETION = "completion" val NO_COMPLETION = "no_completion" val UPDATE = "update" /* position */ val LINE = "line" val END_LINE = "line" val OFFSET = "offset" val END_OFFSET = "end_offset" val FILE = "file" val ID = "id" val DEF_LINE = "def_line" val DEF_OFFSET = "def_offset" val DEF_END_OFFSET = "def_end_offset" val DEF_FILE = "def_file" val DEF_ID = "def_id" val DEF_THEORY = "def_theory" val POSITION = "position" val POSITION_PROPERTIES = Set(LINE, OFFSET, END_OFFSET, FILE, ID) def position_property(entry: Properties.Entry): Boolean = POSITION_PROPERTIES(entry._1) /* position "def" name */ private val def_names: Map[String, String] = Map(LINE -> DEF_LINE, OFFSET -> DEF_OFFSET, END_OFFSET -> DEF_END_OFFSET, FILE -> DEF_FILE, ID -> DEF_ID) def def_name(a: String): String = def_names.getOrElse(a, a + "def_") /* expression */ val EXPRESSION = "expression" object Expression { def unapply(markup: Markup): Option[String] = markup match { case Markup(EXPRESSION, props) => Some(Kind.get(props)) case _ => None } } /* citation */ val CITATION = "citation" val Citation = new Markup_String(CITATION, NAME) /* embedded languages */ val Symbols = new Properties.Boolean("symbols") val Antiquotes = new Properties.Boolean("antiquotes") val Delimited = new Properties.Boolean("delimited") val LANGUAGE = "language" object Language { val DOCUMENT = "document" val ML = "ML" val SML = "SML" val PATH = "path" val UNKNOWN = "unknown" def unapply(markup: Markup): Option[(String, Boolean, Boolean, Boolean)] = markup match { case Markup(LANGUAGE, props) => (props, props, props, props) match { case (Name(name), Symbols(symbols), Antiquotes(antiquotes), Delimited(delimited)) => Some((name, symbols, antiquotes, delimited)) case _ => None } case _ => None } object Path { def unapply(markup: Markup): Option[Boolean] = markup match { case Language(PATH, _, _, delimited) => Some(delimited) case _ => None } } } /* external resources */ val PATH = "path" val Path = new Markup_String(PATH, NAME) val EXPORT_PATH = "export_path" val Export_Path = new Markup_String(EXPORT_PATH, NAME) val URL = "url" val Url = new Markup_String(URL, NAME) val DOC = "doc" val Doc = new Markup_String(DOC, NAME) /* pretty printing */ val Consistent = new Properties.Boolean("consistent") val Indent = new Properties.Int("indent") val Width = new Properties.Int("width") object Block { val name = "block" def apply(c: Boolean, i: Int): Markup = Markup(name, (if (c) Consistent(c) else Nil) ::: (if (i != 0) Indent(i) else Nil)) def unapply(markup: Markup): Option[(Boolean, Int)] = if (markup.name == name) { val c = Consistent.get(markup.properties) val i = Indent.get(markup.properties) Some((c, i)) } else None } object Break { val name = "break" def apply(w: Int, i: Int): Markup = Markup(name, (if (w != 0) Width(w) else Nil) ::: (if (i != 0) Indent(i) else Nil)) def unapply(markup: Markup): Option[(Int, Int)] = if (markup.name == name) { val w = Width.get(markup.properties) val i = Indent.get(markup.properties) Some((w, i)) } else None } val ITEM = "item" val BULLET = "bullet" val SEPARATOR = "separator" /* text properties */ val WORDS = "words" val HIDDEN = "hidden" val DELETE = "delete" /* misc entities */ val SESSION = "session" val THEORY = "theory" val CLASS = "class" val LOCALE = "locale" val BUNDLE = "bundle" val TYPE_NAME = "type_name" val CONSTANT = "constant" val AXIOM = "axiom" val FACT = "fact" val ORACLE = "oracle" val FIXED = "fixed" val CASE = "case" val DYNAMIC_FACT = "dynamic_fact" val LITERAL_FACT = "literal_fact" val ATTRIBUTE = "attribute" val METHOD = "method" val METHOD_MODIFIER = "method_modifier" /* inner syntax */ val TFREE = "tfree" val TVAR = "tvar" val FREE = "free" val SKOLEM = "skolem" val BOUND = "bound" val VAR = "var" val NUMERAL = "numeral" val LITERAL = "literal" val DELIMITER = "delimiter" val INNER_STRING = "inner_string" val INNER_CARTOUCHE = "inner_cartouche" val TOKEN_RANGE = "token_range" val SORTING = "sorting" val TYPING = "typing" val CLASS_PARAMETER = "class_parameter" /* antiquotations */ val ANTIQUOTED = "antiquoted" val ANTIQUOTE = "antiquote" val ML_ANTIQUOTATION = "ML_antiquotation" val DOCUMENT_ANTIQUOTATION = "document_antiquotation" val DOCUMENT_ANTIQUOTATION_OPTION = "document_antiquotation_option" /* document text */ val RAW_TEXT = "raw_text" val PLAIN_TEXT = "plain_text" val PARAGRAPH = "paragraph" val TEXT_FOLD = "text_fold" object Document_Tag extends Markup_String("document_tag", NAME) { val IMPORTANT = "important" val UNIMPORTANT = "unimportant" } /* LaTeX */ val Document_Latex = new Markup_Elem("document_latex") val Latex_Output = new Markup_Elem("latex_output") val Latex_Macro0 = new Markup_String("latex_macro0", NAME) val Latex_Macro = new Markup_String("latex_macro", NAME) val Latex_Environment = new Markup_String("latex_environment", NAME) val Latex_Heading = new Markup_String("latex_heading", KIND) val Latex_Body = new Markup_String("latex_body", KIND) val Latex_Delim = new Markup_String("latex_delim", NAME) val Latex_Tag = new Markup_String("latex_tag", NAME) val Latex_Index_Item = new Markup_Elem("latex_index_item") val Latex_Index_Entry = new Markup_String("latex_index_entry", KIND) val Optional_Argument = new Properties.String("optional_argument") /* Markdown document structure */ val MARKDOWN_PARAGRAPH = "markdown_paragraph" val MARKDOWN_ITEM = "markdown_item" val Markdown_Bullet = new Markup_Int("markdown_bullet", "depth") val Markdown_List = new Markup_String("markdown_list", "kind") val ITEMIZE = "itemize" val ENUMERATE = "enumerate" val DESCRIPTION = "description" /* ML */ val ML_KEYWORD1 = "ML_keyword1" val ML_KEYWORD2 = "ML_keyword2" val ML_KEYWORD3 = "ML_keyword3" val ML_DELIMITER = "ML_delimiter" val ML_TVAR = "ML_tvar" val ML_NUMERAL = "ML_numeral" val ML_CHAR = "ML_char" val ML_STRING = "ML_string" val ML_COMMENT = "ML_comment" val ML_DEF = "ML_def" val ML_OPEN = "ML_open" val ML_STRUCTURE = "ML_structure" val ML_TYPING = "ML_typing" val ML_BREAKPOINT = "ML_breakpoint" /* outer syntax */ val COMMAND_SPAN = "command_span" val Command_Span = new Markup_String(COMMAND_SPAN, NAME) val COMMAND = "command" val KEYWORD = "keyword" val KEYWORD1 = "keyword1" val KEYWORD2 = "keyword2" val KEYWORD3 = "keyword3" val QUASI_KEYWORD = "quasi_keyword" val IMPROPER = "improper" val OPERATOR = "operator" val STRING = "string" val ALT_STRING = "alt_string" - val VERBATIM = "verbatim" val CARTOUCHE = "cartouche" val COMMENT = "comment" val LOAD_COMMAND = "load_command" /* comments */ val COMMENT1 = "comment1" val COMMENT2 = "comment2" val COMMENT3 = "comment3" /* timing */ val Elapsed = new Properties.Double("elapsed") val CPU = new Properties.Double("cpu") val GC = new Properties.Double("gc") object Timing_Properties { def apply(timing: isabelle.Timing): Properties.T = Elapsed(timing.elapsed.seconds) ::: CPU(timing.cpu.seconds) ::: GC(timing.gc.seconds) def unapply(props: Properties.T): Option[isabelle.Timing] = (props, props, props) match { case (Elapsed(elapsed), CPU(cpu), GC(gc)) => Some(new isabelle.Timing(Time.seconds(elapsed), Time.seconds(cpu), Time.seconds(gc))) case _ => None } def get(props: Properties.T): isabelle.Timing = unapply(props).getOrElse(isabelle.Timing.zero) } val TIMING = "timing" object Timing { def apply(timing: isabelle.Timing): Markup = Markup(TIMING, Timing_Properties(timing)) def unapply(markup: Markup): Option[isabelle.Timing] = markup match { case Markup(TIMING, Timing_Properties(timing)) => Some(timing) case _ => None } } /* process result */ val Return_Code = new Properties.Int("return_code") object Process_Result { def apply(result: Process_Result): Properties.T = Return_Code(result.rc) ::: (if (result.timing.is_zero) Nil else Timing_Properties(result.timing)) def unapply(props: Properties.T): Option[Process_Result] = props match { case Return_Code(rc) => val timing = Timing_Properties.unapply(props).getOrElse(isabelle.Timing.zero) Some(isabelle.Process_Result(rc, timing = timing)) case _ => None } } /* command indentation */ val Command_Indent = new Markup_Int("command_indent", Indent.name) /* goals */ val GOAL = "goal" val SUBGOAL = "subgoal" /* command status */ val TASK = "task" val ACCEPTED = "accepted" val FORKED = "forked" val JOINED = "joined" val RUNNING = "running" val FINISHED = "finished" val FAILED = "failed" val CANCELED = "canceled" val INITIALIZED = "initialized" val FINALIZED = "finalized" val CONSOLIDATING = "consolidating" val CONSOLIDATED = "consolidated" /* interactive documents */ val VERSION = "version" val ASSIGN = "assign" /* prover process */ val PROVER_COMMAND = "prover_command" val PROVER_ARG = "prover_arg" /* messages */ val INIT = "init" val STATUS = "status" val REPORT = "report" val RESULT = "result" val WRITELN = "writeln" val STATE = "state" val INFORMATION = "information" val TRACING = "tracing" val WARNING = "warning" val LEGACY = "legacy" val ERROR = "error" val NODES_STATUS = "nodes_status" val PROTOCOL = "protocol" val SYSTEM = "system" val STDOUT = "stdout" val STDERR = "stderr" val EXIT = "exit" val WRITELN_MESSAGE = "writeln_message" val STATE_MESSAGE = "state_message" val INFORMATION_MESSAGE = "information_message" val TRACING_MESSAGE = "tracing_message" val WARNING_MESSAGE = "warning_message" val LEGACY_MESSAGE = "legacy_message" val ERROR_MESSAGE = "error_message" val messages = Map( WRITELN -> WRITELN_MESSAGE, STATE -> STATE_MESSAGE, INFORMATION -> INFORMATION_MESSAGE, TRACING -> TRACING_MESSAGE, WARNING -> WARNING_MESSAGE, LEGACY -> LEGACY_MESSAGE, ERROR -> ERROR_MESSAGE) val message: String => String = messages.withDefault((s: String) => s) val NO_REPORT = "no_report" val BAD = "bad" val INTENSIFY = "intensify" /* ML profiling */ val COUNT = "count" val ML_PROFILING_ENTRY = "ML_profiling_entry" val ML_PROFILING = "ML_profiling" object ML_Profiling { def unapply_entry(tree: XML.Tree): Option[isabelle.ML_Profiling.Entry] = tree match { case XML.Elem(Markup(ML_PROFILING_ENTRY, List((NAME, name), (COUNT, Value.Long(count)))), _) => Some(isabelle.ML_Profiling.Entry(name, count)) case _ => None } def unapply_report(tree: XML.Tree): Option[isabelle.ML_Profiling.Report] = tree match { case XML.Elem(Markup(ML_PROFILING, List((KIND, kind))), body) => Some(isabelle.ML_Profiling.Report(kind, body.flatMap(unapply_entry))) case _ => None } } /* active areas */ val BROWSER = "browser" val GRAPHVIEW = "graphview" val THEORY_EXPORTS = "theory_exports" val SENDBACK = "sendback" val PADDING = "padding" val PADDING_LINE = (PADDING, "line") val PADDING_COMMAND = (PADDING, "command") val DIALOG = "dialog" val Result = new Properties.String(RESULT) val JEDIT_ACTION = "jedit_action" /* protocol message functions */ val FUNCTION = "function" class Function(val name: String) { val PROPERTY: Properties.Entry = (FUNCTION, name) } class Properties_Function(name: String) extends Function(name) { def unapply(props: Properties.T): Option[Properties.T] = props match { case PROPERTY :: args => Some(args) case _ => None } } class Name_Function(name: String) extends Function(name) { def unapply(props: Properties.T): Option[String] = props match { case List(PROPERTY, (NAME, a)) => Some(a) case _ => None } } object ML_Statistics extends Function("ML_statistics") { def unapply(props: Properties.T): Option[(Long, String)] = props match { case List(PROPERTY, ("pid", Value.Long(pid)), ("stats_dir", stats_dir)) => Some((pid, stats_dir)) case _ => None } } val command_timing_properties: Set[String] = Set(FILE, OFFSET, NAME, Elapsed.name) def command_timing_property(entry: Properties.Entry): Boolean = command_timing_properties(entry._1) object Command_Timing extends Properties_Function("command_timing") object Theory_Timing extends Properties_Function("theory_timing") object Session_Timing extends Properties_Function("session_timing") { val Threads = new Properties.Int("threads") } object Task_Statistics extends Properties_Function("task_statistics") object Loading_Theory extends Properties_Function("loading_theory") object Build_Session_Finished extends Function("build_session_finished") object Commands_Accepted extends Function("commands_accepted") object Assign_Update extends Function("assign_update") object Removed_Versions extends Function("removed_versions") object Invoke_Scala extends Function("invoke_scala") { def unapply(props: Properties.T): Option[(String, String)] = props match { case List(PROPERTY, (NAME, name), (ID, id)) => Some((name, id)) case _ => None } } object Cancel_Scala extends Function("cancel_scala") { def unapply(props: Properties.T): Option[String] = props match { case List(PROPERTY, (ID, id)) => Some(id) case _ => None } } val PRINT_OPERATIONS = "print_operations" /* export */ val EXPORT = "export" val THEORY_NAME = "theory_name" val EXECUTABLE = "executable" val COMPRESS = "compress" val STRICT = "strict" /* debugger output */ val DEBUGGER_STATE = "debugger_state" object Debugger_State { def unapply(props: Properties.T): Option[String] = props match { case List((FUNCTION, DEBUGGER_STATE), (NAME, name)) => Some(name) case _ => None } } val DEBUGGER_OUTPUT = "debugger_output" object Debugger_Output { def unapply(props: Properties.T): Option[String] = props match { case List((FUNCTION, DEBUGGER_OUTPUT), (NAME, name)) => Some(name) case _ => None } } /* simplifier trace */ val SIMP_TRACE_PANEL = "simp_trace_panel" val SIMP_TRACE_LOG = "simp_trace_log" val SIMP_TRACE_STEP = "simp_trace_step" val SIMP_TRACE_RECURSE = "simp_trace_recurse" val SIMP_TRACE_HINT = "simp_trace_hint" val SIMP_TRACE_IGNORE = "simp_trace_ignore" val SIMP_TRACE_CANCEL = "simp_trace_cancel" object Simp_Trace_Cancel { def unapply(props: Properties.T): Option[Long] = props match { case (FUNCTION, SIMP_TRACE_CANCEL) :: Serial(i) => Some(i) case _ => None } } /* XML data representation */ def encode: XML.Encode.T[Markup] = (markup: Markup) => { import XML.Encode._ pair(string, properties)((markup.name, markup.properties)) } def decode: XML.Decode.T[Markup] = (body: XML.Body) => { import XML.Decode._ val (name, props) = pair(string, properties)(body) Markup(name, props) } } sealed case class Markup(name: String, properties: Properties.T) { def is_empty: Boolean = name.isEmpty def position_properties: Position.T = properties.filter(Markup.position_property) def markup(s: String): String = YXML.string_of_tree(XML.Elem(this, List(XML.Text(s)))) def update_properties(more_props: Properties.T): Markup = if (more_props.isEmpty) this else Markup(name, more_props.foldRight(properties) { case (p, ps) => Properties.put(ps, p) }) def + (entry: Properties.Entry): Markup = Markup(name, Properties.put(properties, entry)) } diff --git a/src/Pure/PIDE/rendering.scala b/src/Pure/PIDE/rendering.scala --- a/src/Pure/PIDE/rendering.scala +++ b/src/Pure/PIDE/rendering.scala @@ -1,792 +1,790 @@ /* Title: Pure/PIDE/rendering.scala Author: Makarius Isabelle-specific implementation of quasi-abstract rendering and markup interpretation. */ package isabelle import java.io.{File => JFile} import java.nio.file.FileSystems import scala.collection.immutable.SortedMap object Rendering { /* color */ object Color extends Enumeration { // background val unprocessed1, running1, canceled, bad, intensify, entity, active, active_result, markdown_bullet1, markdown_bullet2, markdown_bullet3, markdown_bullet4 = Value val background_colors: ValueSet = values // foreground val quoted, antiquoted = Value val foreground_colors: ValueSet = values -- background_colors // message underline val writeln, information, warning, legacy, error = Value val message_underline_colors: ValueSet = values -- background_colors -- foreground_colors // message background val writeln_message, information_message, tracing_message, warning_message, legacy_message, error_message = Value val message_background_colors: ValueSet = values -- background_colors -- foreground_colors -- message_underline_colors // text val main, keyword1, keyword2, keyword3, quasi_keyword, improper, operator, tfree, tvar, free, skolem, bound, `var`, inner_numeral, inner_quoted, inner_cartouche, comment1, comment2, comment3, dynamic, class_parameter, antiquote, raw_text, plain_text = Value val text_colors: ValueSet = values -- background_colors -- foreground_colors -- message_underline_colors -- message_background_colors // text overview val unprocessed, running = Value val text_overview_colors = Set(unprocessed, running, error, warning) } /* output messages */ val state_pri = 1 val writeln_pri = 2 val information_pri = 3 val tracing_pri = 4 val warning_pri = 5 val legacy_pri = 6 val error_pri = 7 val message_pri = Map( Markup.STATE -> state_pri, Markup.STATE_MESSAGE -> state_pri, Markup.WRITELN -> writeln_pri, Markup.WRITELN_MESSAGE -> writeln_pri, Markup.INFORMATION -> information_pri, Markup.INFORMATION_MESSAGE -> information_pri, Markup.TRACING -> tracing_pri, Markup.TRACING_MESSAGE -> tracing_pri, Markup.WARNING -> warning_pri, Markup.WARNING_MESSAGE -> warning_pri, Markup.LEGACY -> legacy_pri, Markup.LEGACY_MESSAGE -> legacy_pri, Markup.ERROR -> error_pri, Markup.ERROR_MESSAGE -> error_pri ).withDefaultValue(0) val message_underline_color = Map( writeln_pri -> Color.writeln, information_pri -> Color.information, warning_pri -> Color.warning, legacy_pri -> Color.legacy, error_pri -> Color.error) val message_background_color = Map( writeln_pri -> Color.writeln_message, information_pri -> Color.information_message, tracing_pri -> Color.tracing_message, warning_pri -> Color.warning_message, legacy_pri -> Color.legacy_message, error_pri -> Color.error_message) def output_messages(results: Command.Results): List[XML.Elem] = { val (states, other) = results.iterator.map(_._2).filterNot(Protocol.is_result).toList .partition(Protocol.is_state) states ::: other } /* text color */ val text_color = Map( Markup.KEYWORD1 -> Color.keyword1, Markup.KEYWORD2 -> Color.keyword2, Markup.KEYWORD3 -> Color.keyword3, Markup.QUASI_KEYWORD -> Color.quasi_keyword, Markup.IMPROPER -> Color.improper, Markup.OPERATOR -> Color.operator, Markup.STRING -> Color.main, Markup.ALT_STRING -> Color.main, - Markup.VERBATIM -> Color.main, Markup.CARTOUCHE -> Color.main, Markup.LITERAL -> Color.keyword1, Markup.DELIMITER -> Color.main, Markup.TFREE -> Color.tfree, Markup.TVAR -> Color.tvar, Markup.FREE -> Color.free, Markup.SKOLEM -> Color.skolem, Markup.BOUND -> Color.bound, Markup.VAR -> Color.`var`, Markup.INNER_STRING -> Color.inner_quoted, Markup.INNER_CARTOUCHE -> Color.inner_cartouche, Markup.DYNAMIC_FACT -> Color.dynamic, Markup.CLASS_PARAMETER -> Color.class_parameter, Markup.ANTIQUOTE -> Color.antiquote, Markup.RAW_TEXT -> Color.raw_text, Markup.PLAIN_TEXT -> Color.plain_text, Markup.ML_KEYWORD1 -> Color.keyword1, Markup.ML_KEYWORD2 -> Color.keyword2, Markup.ML_KEYWORD3 -> Color.keyword3, Markup.ML_DELIMITER -> Color.main, Markup.ML_NUMERAL -> Color.inner_numeral, Markup.ML_CHAR -> Color.inner_quoted, Markup.ML_STRING -> Color.inner_quoted, Markup.ML_COMMENT -> Color.comment1, Markup.COMMENT -> Color.comment1, Markup.COMMENT1 -> Color.comment1, Markup.COMMENT2 -> Color.comment2, Markup.COMMENT3 -> Color.comment3) val foreground = Map( Markup.STRING -> Color.quoted, Markup.ALT_STRING -> Color.quoted, - Markup.VERBATIM -> Color.quoted, Markup.CARTOUCHE -> Color.quoted, Markup.ANTIQUOTED -> Color.antiquoted) /* tooltips */ val tooltip_descriptions = Map( Markup.CITATION -> "citation", Markup.TOKEN_RANGE -> "inner syntax token", Markup.FREE -> "free variable", Markup.SKOLEM -> "skolem variable", Markup.BOUND -> "bound variable", Markup.VAR -> "schematic variable", Markup.TFREE -> "free type variable", Markup.TVAR -> "schematic type variable") /* entity focus */ object Focus { def apply(ids: Set[Long]): Focus = new Focus(ids) val empty: Focus = apply(Set.empty) def make(args: List[Text.Info[Focus]]): Focus = args.foldLeft(empty) { case (focus1, Text.Info(_, focus2)) => focus1 ++ focus2 } val full: Focus = new Focus(Set.empty) { override def apply(id: Long): Boolean = true override def toString: String = "Focus.full" } } sealed class Focus private[Rendering](protected val rep: Set[Long]) { def defined: Boolean = rep.nonEmpty def apply(id: Long): Boolean = rep.contains(id) def + (id: Long): Focus = if (rep.contains(id)) this else new Focus(rep + id) def ++ (other: Focus): Focus = if (this eq other) this else if (rep.isEmpty) other else other.rep.iterator.foldLeft(this)(_ + _) override def toString: String = rep.mkString("Focus(", ",", ")") } /* markup elements */ val position_elements = Markup.Elements(Markup.BINDING, Markup.ENTITY, Markup.REPORT, Markup.POSITION) val semantic_completion_elements = Markup.Elements(Markup.COMPLETION, Markup.NO_COMPLETION) val language_context_elements = - Markup.Elements(Markup.STRING, Markup.ALT_STRING, Markup.VERBATIM, + Markup.Elements(Markup.STRING, Markup.ALT_STRING, Markup.CARTOUCHE, Markup.COMMENT, Markup.LANGUAGE, Markup.ML_STRING, Markup.ML_COMMENT) val language_elements = Markup.Elements(Markup.LANGUAGE) val citation_elements = Markup.Elements(Markup.CITATION) val active_elements = Markup.Elements(Markup.DIALOG, Markup.BROWSER, Markup.GRAPHVIEW, Markup.THEORY_EXPORTS, Markup.SENDBACK, Markup.JEDIT_ACTION, Markup.SIMP_TRACE_PANEL) val background_elements = Document_Status.Command_Status.proper_elements + Markup.WRITELN_MESSAGE + Markup.STATE_MESSAGE + Markup.INFORMATION_MESSAGE + Markup.TRACING_MESSAGE + Markup.WARNING_MESSAGE + Markup.LEGACY_MESSAGE + Markup.ERROR_MESSAGE + Markup.BAD + Markup.INTENSIFY + Markup.ENTITY + Markup.Markdown_Bullet.name ++ active_elements val foreground_elements = Markup.Elements(foreground.keySet) val text_color_elements = Markup.Elements(text_color.keySet) val tooltip_elements = Markup.Elements(Markup.LANGUAGE, Markup.EXPRESSION, Markup.TIMING, Markup.ENTITY, Markup.SORTING, Markup.TYPING, Markup.CLASS_PARAMETER, Markup.ML_TYPING, Markup.ML_BREAKPOINT, Markup.PATH, Markup.DOC, Markup.URL, Markup.MARKDOWN_PARAGRAPH, Markup.MARKDOWN_ITEM, Markup.Markdown_List.name) ++ Markup.Elements(tooltip_descriptions.keySet) val tooltip_message_elements = Markup.Elements(Markup.WRITELN, Markup.INFORMATION, Markup.WARNING, Markup.LEGACY, Markup.ERROR, Markup.BAD) val message_elements = Markup.Elements(message_pri.keySet) val warning_elements = Markup.Elements(Markup.WARNING, Markup.LEGACY) val error_elements = Markup.Elements(Markup.ERROR) val entity_elements = Markup.Elements(Markup.ENTITY) val antiquoted_elements = Markup.Elements(Markup.ANTIQUOTED) val meta_data_elements = Markup.Elements(Markup.META_TITLE, Markup.META_CREATOR, Markup.META_CONTRIBUTOR, Markup.META_DATE, Markup.META_DESCRIPTION, Markup.META_LICENSE) val document_tag_elements = Markup.Elements(Markup.Document_Tag.name) val markdown_elements = Markup.Elements(Markup.MARKDOWN_PARAGRAPH, Markup.MARKDOWN_ITEM, Markup.Markdown_List.name, Markup.Markdown_Bullet.name) } class Rendering( val snapshot: Document.Snapshot, val options: Options, val session: Session) { override def toString: String = "Rendering(" + snapshot.toString + ")" def get_text(range: Text.Range): Option[String] = None /* caret */ def before_caret_range(caret: Text.Offset): Text.Range = { val former_caret = snapshot.revert(caret) snapshot.convert(Text.Range(former_caret - 1, former_caret)) } /* completion */ def semantic_completion(completed_range: Option[Text.Range], caret_range: Text.Range) : Option[Text.Info[Completion.Semantic]] = if (snapshot.is_outdated) None else { snapshot.select(caret_range, Rendering.semantic_completion_elements, _ => { case Completion.Semantic.Info(info) => completed_range match { case Some(range0) if range0.contains(info.range) && range0 != info.range => None case _ => Some(info) } case _ => None }).headOption.map(_.info) } def semantic_completion_result( history: Completion.History, unicode: Boolean, completed_range: Option[Text.Range], caret_range: Text.Range): (Boolean, Option[Completion.Result]) = { semantic_completion(completed_range, caret_range) match { case Some(Text.Info(_, Completion.No_Completion)) => (true, None) case Some(Text.Info(range, names: Completion.Names)) => get_text(range) match { case Some(original) => (false, names.complete(range, history, unicode, original)) case None => (false, None) } case None => (false, None) } } def language_context(range: Text.Range): Option[Completion.Language_Context] = snapshot.select(range, Rendering.language_context_elements, _ => { case Text.Info(_, XML.Elem(Markup.Language(language, symbols, antiquotes, delimited), _)) => if (delimited) Some(Completion.Language_Context(language, symbols, antiquotes)) else None case Text.Info(_, elem) if elem.name == Markup.ML_STRING || elem.name == Markup.ML_COMMENT => Some(Completion.Language_Context.ML_inner) case Text.Info(_, _) => Some(Completion.Language_Context.inner) }).headOption.map(_.info) def citations(range: Text.Range): List[Text.Info[String]] = snapshot.select(range, Rendering.citation_elements, _ => { case Text.Info(info_range, XML.Elem(Markup.Citation(name), _)) => Some(Text.Info(snapshot.convert(info_range), name)) case _ => None }).map(_.info) /* file-system path completion */ def language_path(range: Text.Range): Option[Text.Info[Boolean]] = snapshot.select(range, Rendering.language_elements, _ => { case Text.Info(info_range, XML.Elem(Markup.Language.Path(delimited), _)) => Some((delimited, snapshot.convert(info_range))) case _ => None }).headOption.map({ case Text.Info(_, (delimited, range)) => Text.Info(range, delimited) }) def path_completion(caret: Text.Offset): Option[Completion.Result] = { def complete(text: String): List[(String, List[String])] = { try { val path = Path.explode(text) val (dir, base_name) = if (text == "" || text.endsWith("/")) (path, "") else (path.dir, path.file_name) val directory = new JFile(session.resources.append(snapshot.node_name, dir)) val files = directory.listFiles if (files == null) Nil else { val ignore = space_explode(':', options.string("completion_path_ignore")). map(s => FileSystems.getDefault.getPathMatcher("glob:" + s)) (for { file <- files.iterator name = file.getName if name.startsWith(base_name) path_name = new JFile(name).toPath if !ignore.exists(matcher => matcher.matches(path_name)) text1 = (dir + Path.basic(name)).implode_short if text != text1 is_dir = new JFile(directory, name).isDirectory replacement = text1 + (if (is_dir) "/" else "") descr = List(text1, if (is_dir) "(directory)" else "(file)") } yield (replacement, descr)).take(options.int("completion_limit")).toList } } catch { case ERROR(_) => Nil } } def is_wrapped(s: String): Boolean = s.startsWith("\"") && s.endsWith("\"") || s.startsWith(Symbol.open_decoded) && s.endsWith(Symbol.close_decoded) for { Text.Info(r1, delimited) <- language_path(before_caret_range(caret)) s1 <- get_text(r1) (r2, s2) <- if (is_wrapped(s1)) { Some((Text.Range(r1.start + 1, r1.stop - 1), s1.substring(1, s1.length - 1))) } else if (delimited) Some((r1, s1)) else None if Path.is_valid(s2) paths = complete(s2) if paths.nonEmpty items = paths.map(p => Completion.Item(r2, s2, "", p._2, p._1, 0, false)) } yield Completion.Result(r2, s2, false, items) } /* spell checker */ private lazy val spell_checker_include = Markup.Elements(space_explode(',', options.string("spell_checker_include")): _*) private lazy val spell_checker_elements = spell_checker_include ++ Markup.Elements(space_explode(',', options.string("spell_checker_exclude")): _*) def spell_checker(range: Text.Range): List[Text.Info[Text.Range]] = { val result = snapshot.select(range, spell_checker_elements, _ => { case info => Some( if (spell_checker_include(info.info.name)) Some(snapshot.convert(info.range)) else None) }) for (Text.Info(range, Some(range1)) <- result) yield Text.Info(range, range1) } def spell_checker_point(range: Text.Range): Option[Text.Range] = spell_checker(range).headOption.map(_.info) /* text background */ def background(elements: Markup.Elements, range: Text.Range, focus: Rendering.Focus) : List[Text.Info[Rendering.Color.Value]] = { for { Text.Info(r, result) <- snapshot.cumulate[(List[Markup], Option[Rendering.Color.Value])]( range, (List(Markup.Empty), None), elements, command_states => { case ((markups, color), Text.Info(_, XML.Elem(markup, _))) if markups.nonEmpty && Document_Status.Command_Status.proper_elements(markup.name) => Some((markup :: markups, color)) case (_, Text.Info(_, XML.Elem(Markup(Markup.BAD, _), _))) => Some((Nil, Some(Rendering.Color.bad))) case (_, Text.Info(_, XML.Elem(Markup(Markup.INTENSIFY, _), _))) => Some((Nil, Some(Rendering.Color.intensify))) case (_, Text.Info(_, XML.Elem(Markup.Entity.Occ(i), _))) if focus(i) => Some((Nil, Some(Rendering.Color.entity))) case (_, Text.Info(_, XML.Elem(Markup.Markdown_Bullet(depth), _))) => val color = depth % 4 match { case 1 => Rendering.Color.markdown_bullet1 case 2 => Rendering.Color.markdown_bullet2 case 3 => Rendering.Color.markdown_bullet3 case _ => Rendering.Color.markdown_bullet4 } Some((Nil, Some(color))) case (_, Text.Info(_, Protocol.Dialog(_, serial, result))) => command_states.collectFirst( { case st if st.results.defined(serial) => st.results.get(serial).get }) match { case Some(Protocol.Dialog_Result(res)) if res == result => Some((Nil, Some(Rendering.Color.active_result))) case _ => Some((Nil, Some(Rendering.Color.active))) } case (_, Text.Info(_, elem)) => if (Rendering.active_elements(elem.name)) Some((Nil, Some(Rendering.Color.active))) else None }) color <- result match { case (markups, opt_color) if markups.nonEmpty => val status = Document_Status.Command_Status.make(markups.iterator) if (status.is_unprocessed) Some(Rendering.Color.unprocessed1) else if (status.is_running) Some(Rendering.Color.running1) else if (status.is_canceled) Some(Rendering.Color.canceled) else opt_color case (_, opt_color) => opt_color } } yield Text.Info(r, color) } /* text foreground */ def foreground(range: Text.Range): List[Text.Info[Rendering.Color.Value]] = snapshot.select(range, Rendering.foreground_elements, _ => { case info => Some(Rendering.foreground(info.info.name)) }) /* entity focus */ def entity_focus_defs(range: Text.Range): Rendering.Focus = Rendering.Focus.make( snapshot.cumulate(range, Rendering.Focus.empty, Rendering.entity_elements, _ => { case (focus, Text.Info(_, XML.Elem(Markup.Entity.Def(i), _))) => Some(focus + i) case _ => None })) def entity_focus(range: Text.Range, defs_focus: Rendering.Focus): Rendering.Focus = Rendering.Focus.make( snapshot.cumulate(range, Rendering.Focus.empty, Rendering.entity_elements, _ => { case (focus, Text.Info(_, XML.Elem(Markup.Entity.Ref(i), _))) if defs_focus(i) => Some(focus + i) case _ => None })) /* caret focus */ def caret_focus(caret_range: Text.Range, defs_range: Text.Range): Rendering.Focus = { val focus = entity_focus_defs(caret_range) if (focus.defined) focus else if (defs_range == Text.Range.offside) Rendering.Focus.empty else { val defs_focus = if (defs_range == Text.Range.full) Rendering.Focus.full else entity_focus_defs(defs_range) entity_focus(caret_range, defs_focus) } } def caret_focus_ranges(caret_range: Text.Range, defs_range: Text.Range): List[Text.Range] = { val focus = caret_focus(caret_range, defs_range) if (focus.defined) { snapshot.cumulate[Boolean](defs_range, false, Rendering.entity_elements, _ => { case (_, Text.Info(_, XML.Elem(Markup.Entity.Occ(i), _))) if focus(i) => Some(true) case _ => None }).flatMap(info => if (info.info) Some(info.range) else None) } else Nil } /* messages */ def message_underline_color(elements: Markup.Elements, range: Text.Range) : List[Text.Info[Rendering.Color.Value]] = { val results = snapshot.cumulate[Int](range, 0, elements, _ => { case (pri, Text.Info(_, elem)) => Some(pri max Rendering.message_pri(elem.name)) }) for { Text.Info(r, pri) <- results color <- Rendering.message_underline_color.get(pri) } yield Text.Info(r, color) } def text_messages(range: Text.Range): List[Text.Info[XML.Elem]] = { val results = snapshot.cumulate[Vector[Command.Results.Entry]]( range, Vector.empty, Rendering.message_elements, command_states => { case (res, Text.Info(_, elem)) => Command.State.get_result_proper(command_states, elem.markup.properties) .map(res :+ _) case _ => None }) var seen_serials = Set.empty[Long] def seen(i: Long): Boolean = { val b = seen_serials(i) seen_serials += i b } for { Text.Info(range, entries) <- results (i, elem) <- entries if !seen(i) } yield Text.Info(range, elem) } /* tooltips */ def timing_threshold: Double = 0.0 private sealed case class Tooltip_Info( range: Text.Range, timing: Timing = Timing.zero, messages: List[(Long, XML.Tree)] = Nil, rev_infos: List[(Boolean, XML.Tree)] = Nil) { def + (t: Timing): Tooltip_Info = copy(timing = timing + t) def + (r0: Text.Range, serial: Long, tree: XML.Tree): Tooltip_Info = { val r = snapshot.convert(r0) if (range == r) copy(messages = (serial -> tree) :: messages) else copy(range = r, messages = List(serial -> tree)) } def + (r0: Text.Range, important: Boolean, tree: XML.Tree): Tooltip_Info = { val r = snapshot.convert(r0) if (range == r) copy(rev_infos = (important -> tree) :: rev_infos) else copy (range = r, rev_infos = List(important -> tree)) } def timing_info(tree: XML.Tree): Option[XML.Tree] = tree match { case XML.Elem(Markup(Markup.TIMING, _), _) => if (timing.elapsed.seconds >= timing_threshold) Some(XML.Text(timing.message)) else None case _ => Some(tree) } def infos(important: Boolean): List[XML.Tree] = for { (is_important, tree) <- rev_infos.reverse if is_important == important tree1 <- timing_info(tree) } yield tree1 } def perhaps_append_file(node_name: Document.Node.Name, name: String): String = if (Path.is_valid(name)) session.resources.append(node_name, Path.explode(name)) else name def tooltips(elements: Markup.Elements, range: Text.Range): Option[Text.Info[List[XML.Tree]]] = { val results = snapshot.cumulate[Tooltip_Info](range, Tooltip_Info(range), elements, command_states => { case (info, Text.Info(_, XML.Elem(Markup.Timing(t), _))) => Some(info + t) case (info, Text.Info(r0, msg @ XML.Elem(Markup(Markup.BAD, Markup.Serial(i)), body))) if body.nonEmpty => Some(info + (r0, i, msg)) case (info, Text.Info(r0, XML.Elem(Markup(name, props), _))) if Rendering.tooltip_message_elements(name) => for ((i, tree) <- Command.State.get_result_proper(command_states, props)) yield (info + (r0, i, tree)) case (info, Text.Info(r0, XML.Elem(Markup.Entity(kind, name), _))) if kind != "" && kind != Markup.ML_DEF => val kind1 = Word.implode(Word.explode('_', kind)) val txt1 = if (name == "") kind1 else if (kind1 == "") quote(name) else kind1 + " " + quote(name) val info1 = info + (r0, true, XML.Text(txt1)) Some(if (kind == Markup.COMMAND) info1 + (r0, true, XML.elem(Markup.TIMING)) else info1) case (info, Text.Info(r0, XML.Elem(Markup.Path(name), _))) => val file = perhaps_append_file(snapshot.node_name, name) val text = if (name == file) "file " + quote(file) else "path " + quote(name) + "\nfile " + quote(file) Some(info + (r0, true, XML.Text(text))) case (info, Text.Info(r0, XML.Elem(Markup.Doc(name), _))) => val text = "doc " + quote(name) Some(info + (r0, true, XML.Text(text))) case (info, Text.Info(r0, XML.Elem(Markup.Url(name), _))) => Some(info + (r0, true, XML.Text("URL " + quote(name)))) case (info, Text.Info(r0, XML.Elem(Markup(name, _), body))) if name == Markup.SORTING || name == Markup.TYPING => Some(info + (r0, true, Pretty.block(XML.Text("::") :: Pretty.brk(1) :: body))) case (info, Text.Info(r0, XML.Elem(Markup(Markup.CLASS_PARAMETER, _), body))) => Some(info + (r0, true, Pretty.block(0, body))) case (info, Text.Info(r0, XML.Elem(Markup(Markup.ML_TYPING, _), body))) => Some(info + (r0, false, Pretty.block(XML.Text("ML:") :: Pretty.brk(1) :: body))) case (info, Text.Info(r0, Protocol.ML_Breakpoint(breakpoint))) => val text = if (session.debugger.breakpoint_state(breakpoint)) "breakpoint (enabled)" else "breakpoint (disabled)" Some(info + (r0, true, XML.Text(text))) case (info, Text.Info(r0, XML.Elem(Markup.Language(language, _, _, _), _))) => val lang = Word.implode(Word.explode('_', language)) Some(info + (r0, true, XML.Text("language: " + lang))) case (info, Text.Info(r0, XML.Elem(Markup.Expression(kind), _))) => val descr = if (kind == "") "expression" else "expression: " + kind Some(info + (r0, true, XML.Text(descr))) case (info, Text.Info(r0, XML.Elem(Markup(Markup.MARKDOWN_PARAGRAPH, _), _))) => Some(info + (r0, true, XML.Text("Markdown: paragraph"))) case (info, Text.Info(r0, XML.Elem(Markup(Markup.MARKDOWN_ITEM, _), _))) => Some(info + (r0, true, XML.Text("Markdown: item"))) case (info, Text.Info(r0, XML.Elem(Markup.Markdown_List(kind), _))) => Some(info + (r0, true, XML.Text("Markdown: " + kind))) case (info, Text.Info(r0, XML.Elem(Markup(name, _), _))) => Rendering.tooltip_descriptions.get(name).map(desc => info + (r0, true, XML.Text(desc))) }).map(_.info) if (results.isEmpty) None else { val r = Text.Range(results.head.range.start, results.last.range.stop) val all_tips = results.flatMap(_.messages).foldLeft(SortedMap.empty[Long, XML.Tree])(_ + _) .iterator.map(_._2).toList ::: results.flatMap(res => res.infos(true)) ::: results.flatMap(res => res.infos(false)).lastOption.toList if (all_tips.isEmpty) None else Some(Text.Info(r, all_tips)) } } /* messages */ def warnings(range: Text.Range): List[Text.Markup] = snapshot.select(range, Rendering.warning_elements, _ => Some(_)).map(_.info) def errors(range: Text.Range): List[Text.Markup] = snapshot.select(range, Rendering.error_elements, _ => Some(_)).map(_.info) /* command status overview */ def overview_color(range: Text.Range): Option[Rendering.Color.Value] = { if (snapshot.is_outdated) None else { val results = snapshot.cumulate[List[Markup]](range, Nil, Document_Status.Command_Status.liberal_elements, _ => { case (status, Text.Info(_, elem)) => Some(elem.markup :: status) }, status = true) if (results.isEmpty) None else { val status = Document_Status.Command_Status.make(results.iterator.flatMap(_.info)) if (status.is_running) Some(Rendering.Color.running) else if (status.is_failed) Some(Rendering.Color.error) else if (status.is_warned) Some(Rendering.Color.warning) else if (status.is_unprocessed) Some(Rendering.Color.unprocessed) else None } } } /* antiquoted text */ def antiquoted(range: Text.Range): Option[Text.Range] = snapshot.cumulate[Option[Text.Range]](range, None, Rendering.antiquoted_elements, _ => { case (res, info) => if (res.isEmpty) Some(Some(info.range)) else None }).headOption.flatMap(_.info) /* meta data */ def meta_data(range: Text.Range): Properties.T = { val results = snapshot.cumulate[Properties.T](range, Nil, Rendering.meta_data_elements, _ => { case (res, Text.Info(_, elem)) => val plain_text = XML.content(elem) Some((elem.name -> plain_text) :: res) }) Library.distinct(results.flatMap(_.info.reverse)) } /* document tags */ def document_tags(range: Text.Range): List[String] = { val results = snapshot.cumulate[List[String]](range, Nil, Rendering.document_tag_elements, _ => { case (res, Text.Info(_, XML.Elem(Markup.Document_Tag(name), _))) => Some(name :: res) case _ => None }) Library.distinct(results.flatMap(_.info.reverse)) } } diff --git a/src/Pure/Pure.thy b/src/Pure/Pure.thy --- a/src/Pure/Pure.thy +++ b/src/Pure/Pure.thy @@ -1,1535 +1,1535 @@ (* Title: Pure/Pure.thy Author: Makarius The Pure theory, with definitions of Isar commands and some lemmas. *) theory Pure keywords "!!" "!" "+" ":" ";" "<" "<=" "==" "=>" "?" "[" "\" "\" "\" "\" "\" "\" "]" "binder" "by" "in" "infix" "infixl" "infixr" "is" "open" "output" "overloaded" "pervasive" "premises" "structure" "unchecked" and "private" "qualified" :: before_command and "assumes" "constrains" "defines" "fixes" "for" "if" "includes" "notes" "rewrites" "obtains" "shows" "when" "where" "|" :: quasi_command and "text" "txt" :: document_body and "text_raw" :: document_raw and "default_sort" :: thy_decl and "typedecl" "nonterminal" "judgment" "consts" "syntax" "no_syntax" "translations" "no_translations" "type_notation" "no_type_notation" "notation" "no_notation" "alias" "type_alias" "declare" "hide_class" "hide_type" "hide_const" "hide_fact" :: thy_decl and "type_synonym" "definition" "abbreviation" "lemmas" :: thy_defn and "axiomatization" :: thy_stmt and "external_file" "bibtex_file" "ROOTS_file" :: thy_load and "generate_file" :: thy_decl and "export_generated_files" :: diag and "compile_generated_files" :: diag and "external_files" "export_files" "export_prefix" and "ML_file" "ML_file_debug" "ML_file_no_debug" :: thy_load % "ML" and "SML_file" "SML_file_debug" "SML_file_no_debug" :: thy_load % "ML" and "SML_import" "SML_export" "ML_export" :: thy_decl % "ML" and "ML_prf" :: prf_decl % "proof" (* FIXME % "ML" ?? *) and "ML_val" "ML_command" :: diag % "ML" and "simproc_setup" :: thy_decl % "ML" and "setup" "local_setup" "attribute_setup" "method_setup" "declaration" "syntax_declaration" "parse_ast_translation" "parse_translation" "print_translation" "typed_print_translation" "print_ast_translation" "oracle" :: thy_decl % "ML" and "bundle" :: thy_decl_block and "unbundle" :: thy_decl and "include" "including" :: prf_decl and "print_bundles" :: diag and "context" "locale" "experiment" :: thy_decl_block and "interpret" :: prf_goal % "proof" and "interpretation" "global_interpretation" "sublocale" :: thy_goal and "class" :: thy_decl_block and "subclass" :: thy_goal and "instantiation" :: thy_decl_block and "instance" :: thy_goal and "overloading" :: thy_decl_block and "opening" :: quasi_command and "code_datatype" :: thy_decl and "theorem" "lemma" "corollary" "proposition" :: thy_goal_stmt and "schematic_goal" :: thy_goal_stmt and "notepad" :: thy_decl_block and "have" :: prf_goal % "proof" and "hence" :: prf_goal % "proof" and "show" :: prf_asm_goal % "proof" and "thus" :: prf_asm_goal % "proof" and "then" "from" "with" :: prf_chain % "proof" and "note" :: prf_decl % "proof" and "supply" :: prf_script % "proof" and "using" "unfolding" :: prf_decl % "proof" and "fix" "assume" "presume" "define" :: prf_asm % "proof" and "consider" :: prf_goal % "proof" and "obtain" :: prf_asm_goal % "proof" and "let" "write" :: prf_decl % "proof" and "case" :: prf_asm % "proof" and "{" :: prf_open % "proof" and "}" :: prf_close % "proof" and "next" :: next_block % "proof" and "qed" :: qed_block % "proof" and "by" ".." "." "sorry" "\" :: "qed" % "proof" and "done" :: "qed_script" % "proof" and "oops" :: qed_global % "proof" and "defer" "prefer" "apply" :: prf_script % "proof" and "apply_end" :: prf_script % "proof" and "subgoal" :: prf_script_goal % "proof" and "proof" :: prf_block % "proof" and "also" "moreover" :: prf_decl % "proof" and "finally" "ultimately" :: prf_chain % "proof" and "back" :: prf_script % "proof" and "help" "print_commands" "print_options" "print_context" "print_theory" "print_definitions" "print_syntax" "print_abbrevs" "print_defn_rules" "print_theorems" "print_locales" "print_classes" "print_locale" "print_interps" "print_attributes" "print_simpset" "print_rules" "print_trans_rules" "print_methods" "print_antiquotations" "print_ML_antiquotations" "thy_deps" "locale_deps" "class_deps" "thm_deps" "thm_oracles" "print_term_bindings" "print_facts" "print_cases" "print_statement" "thm" "prf" "full_prf" "prop" "term" "typ" "print_codesetup" "unused_thms" :: diag and "print_state" :: diag and "welcome" :: diag and "end" :: thy_end and "realizers" :: thy_decl and "realizability" :: thy_decl and "extract_type" "extract" :: thy_decl and "find_theorems" "find_consts" :: diag and "named_theorems" :: thy_decl abbrevs "\\tag" = "\<^marker>\tag \" and "===>" = "===>" (*prevent replacement of very long arrows*) and "--->" = "\\" and "hence" "thus" "default_sort" "simproc_setup" "apply_end" "realizers" "realizability" = "" and "hence" = "then have" and "thus" = "then show" begin section \Isar commands\ subsection \Other files\ ML \ local val _ = Outer_Syntax.command \<^command_keyword>\external_file\ "formal dependency on external file" (Resources.provide_parse_file >> (fn get_file => Toplevel.theory (#2 o get_file))); val _ = Outer_Syntax.command \<^command_keyword>\bibtex_file\ "check bibtex database file in Prover IDE" (Resources.provide_parse_file >> (fn get_file => Toplevel.theory (fn thy => let val ({lines, pos, ...}, thy') = get_file thy; val _ = Bibtex.check_database_output pos (cat_lines lines); in thy' end))); val _ = Outer_Syntax.command \<^command_keyword>\ROOTS_file\ "session ROOTS file" (Resources.provide_parse_file >> (fn get_file => Toplevel.theory (fn thy => let val ({src_path, lines, pos = pos0, ...}, thy') = get_file thy; val ctxt = Proof_Context.init_global thy'; val dir = Path.dir (Path.expand (Resources.master_directory thy' + src_path)); val _ = (lines, pos0) |-> fold (fn line => fn pos1 => let val pos2 = Position.symbol_explode line pos1; val range = Position.range (pos1, pos2); val source = Input.source true line range; val _ = if line = "" then () else if String.isPrefix "#" line then Context_Position.report ctxt (#1 range) Markup.comment else (ignore (Resources.check_session_dir ctxt (SOME dir) source) handle ERROR msg => Output.error_message msg); in pos2 |> Position.symbol "\n" end); in thy' end))); val _ = Outer_Syntax.local_theory \<^command_keyword>\generate_file\ "generate source file, with antiquotations" (Parse.path_binding -- (\<^keyword>\=\ |-- Parse.embedded_input) >> Generated_Files.generate_file_cmd); val files_in_theory = (Parse.underscore >> K [] || Scan.repeat1 Parse.path_binding) -- Scan.option (\<^keyword>\(\ |-- Parse.!!! (\<^keyword>\in\ |-- Parse.theory_name --| \<^keyword>\)\)); val _ = Outer_Syntax.command \<^command_keyword>\export_generated_files\ "export generated files from given theories" (Parse.and_list1 files_in_theory >> (fn args => Toplevel.keep (fn st => Generated_Files.export_generated_files_cmd (Toplevel.context_of st) args))); val base_dir = Scan.optional (\<^keyword>\(\ |-- Parse.!!! (\<^keyword>\in\ |-- Parse.path_input --| \<^keyword>\)\)) (Input.string ""); val external_files = Scan.repeat1 Parse.path_input -- base_dir; val exe = Parse.reserved "exe" >> K true || Parse.reserved "executable" >> K false; val executable = \<^keyword>\(\ |-- Parse.!!! (exe --| \<^keyword>\)\) >> SOME || Scan.succeed NONE; val export_files = Scan.repeat1 Parse.path_binding -- executable; val _ = Outer_Syntax.command \<^command_keyword>\compile_generated_files\ "compile generated files and export results" (Parse.and_list files_in_theory -- Scan.optional (\<^keyword>\external_files\ |-- Parse.!!! (Parse.and_list1 external_files)) [] -- Scan.optional (\<^keyword>\export_files\ |-- Parse.!!! (Parse.and_list1 export_files)) [] -- Scan.optional (\<^keyword>\export_prefix\ |-- Parse.path_binding) ("", Position.none) -- (Parse.where_ |-- Parse.!!! Parse.ML_source) >> (fn ((((args, external), export), export_prefix), source) => Toplevel.keep (fn st => Generated_Files.compile_generated_files_cmd (Toplevel.context_of st) args external export export_prefix source))); in end\ external_file "ROOT0.ML" external_file "ROOT.ML" subsection \Embedded ML text\ ML \ local val semi = Scan.option \<^keyword>\;\; val _ = Outer_Syntax.command \<^command_keyword>\ML_file\ "read and evaluate Isabelle/ML file" (Resources.parse_file --| semi >> ML_File.ML NONE); val _ = Outer_Syntax.command \<^command_keyword>\ML_file_debug\ "read and evaluate Isabelle/ML file (with debugger information)" (Resources.parse_file --| semi >> ML_File.ML (SOME true)); val _ = Outer_Syntax.command \<^command_keyword>\ML_file_no_debug\ "read and evaluate Isabelle/ML file (no debugger information)" (Resources.parse_file --| semi >> ML_File.ML (SOME false)); val _ = Outer_Syntax.command \<^command_keyword>\SML_file\ "read and evaluate Standard ML file" (Resources.parse_file --| semi >> ML_File.SML NONE); val _ = Outer_Syntax.command \<^command_keyword>\SML_file_debug\ "read and evaluate Standard ML file (with debugger information)" (Resources.parse_file --| semi >> ML_File.SML (SOME true)); val _ = Outer_Syntax.command \<^command_keyword>\SML_file_no_debug\ "read and evaluate Standard ML file (no debugger information)" (Resources.parse_file --| semi >> ML_File.SML (SOME false)); val _ = Outer_Syntax.command \<^command_keyword>\SML_export\ "evaluate SML within Isabelle/ML environment" (Parse.ML_source >> (fn source => let val flags: ML_Compiler.flags = {environment = ML_Env.SML_export, redirect = false, verbose = true, debug = NONE, writeln = writeln, warning = warning}; in Toplevel.theory (Context.theory_map (ML_Context.exec (fn () => ML_Context.eval_source flags source))) end)); val _ = Outer_Syntax.command \<^command_keyword>\SML_import\ "evaluate Isabelle/ML within SML environment" (Parse.ML_source >> (fn source => let val flags: ML_Compiler.flags = {environment = ML_Env.SML_import, redirect = false, verbose = true, debug = NONE, writeln = writeln, warning = warning}; in Toplevel.generic_theory (ML_Context.exec (fn () => ML_Context.eval_source flags source) #> Local_Theory.propagate_ml_env) end)); val _ = Outer_Syntax.command ("ML_export", \<^here>) "ML text within theory or local theory, and export to bootstrap environment" (Parse.ML_source >> (fn source => Toplevel.generic_theory (fn context => context |> Config.put_generic ML_Env.ML_environment ML_Env.Isabelle |> Config.put_generic ML_Env.ML_write_global true |> ML_Context.exec (fn () => ML_Context.eval_source (ML_Compiler.verbose true ML_Compiler.flags) source) |> Config.restore_generic ML_Env.ML_write_global context |> Config.restore_generic ML_Env.ML_environment context |> Local_Theory.propagate_ml_env))); val _ = Outer_Syntax.command \<^command_keyword>\ML_prf\ "ML text within proof" (Parse.ML_source >> (fn source => Toplevel.proof (Proof.map_context (Context.proof_map (ML_Context.exec (fn () => ML_Context.eval_source (ML_Compiler.verbose true ML_Compiler.flags) source))) #> Proof.propagate_ml_env))); val _ = Outer_Syntax.command \<^command_keyword>\ML_val\ "diagnostic ML text" (Parse.ML_source >> Isar_Cmd.ml_diag true); val _ = Outer_Syntax.command \<^command_keyword>\ML_command\ "diagnostic ML text (silent)" (Parse.ML_source >> Isar_Cmd.ml_diag false); val _ = Outer_Syntax.command \<^command_keyword>\setup\ "ML setup for global theory" (Parse.ML_source >> (Toplevel.theory o Isar_Cmd.setup)); val _ = Outer_Syntax.local_theory \<^command_keyword>\local_setup\ "ML setup for local theory" (Parse.ML_source >> Isar_Cmd.local_setup); val _ = Outer_Syntax.command \<^command_keyword>\oracle\ "declare oracle" (Parse.range Parse.name -- Parse.!!! (\<^keyword>\=\ |-- Parse.ML_source) >> (fn (x, y) => Toplevel.theory (Isar_Cmd.oracle x y))); val _ = Outer_Syntax.local_theory \<^command_keyword>\attribute_setup\ "define attribute in ML" (Parse.name_position -- - Parse.!!! (\<^keyword>\=\ |-- Parse.ML_source -- Scan.optional Parse.text "") + Parse.!!! (\<^keyword>\=\ |-- Parse.ML_source -- Scan.optional Parse.embedded "") >> (fn (name, (txt, cmt)) => Attrib.attribute_setup name txt cmt)); val _ = Outer_Syntax.local_theory \<^command_keyword>\method_setup\ "define proof method in ML" (Parse.name_position -- - Parse.!!! (\<^keyword>\=\ |-- Parse.ML_source -- Scan.optional Parse.text "") + Parse.!!! (\<^keyword>\=\ |-- Parse.ML_source -- Scan.optional Parse.embedded "") >> (fn (name, (txt, cmt)) => Method.method_setup name txt cmt)); val _ = Outer_Syntax.local_theory \<^command_keyword>\declaration\ "generic ML declaration" (Parse.opt_keyword "pervasive" -- Parse.ML_source >> (fn (pervasive, txt) => Isar_Cmd.declaration {syntax = false, pervasive = pervasive} txt)); val _ = Outer_Syntax.local_theory \<^command_keyword>\syntax_declaration\ "generic ML syntax declaration" (Parse.opt_keyword "pervasive" -- Parse.ML_source >> (fn (pervasive, txt) => Isar_Cmd.declaration {syntax = true, pervasive = pervasive} txt)); val _ = Outer_Syntax.local_theory \<^command_keyword>\simproc_setup\ "define simproc in ML" (Parse.name_position -- (\<^keyword>\(\ |-- Parse.enum1 "|" Parse.term --| \<^keyword>\)\ --| \<^keyword>\=\) -- Parse.ML_source >> (fn ((a, b), c) => Isar_Cmd.simproc_setup a b c)); in end\ subsection \Theory commands\ subsubsection \Sorts and types\ ML \ local val _ = Outer_Syntax.local_theory \<^command_keyword>\default_sort\ "declare default sort for explicit type variables" (Parse.sort >> (fn s => fn lthy => Local_Theory.set_defsort (Syntax.read_sort lthy s) lthy)); val _ = Outer_Syntax.local_theory \<^command_keyword>\typedecl\ "type declaration" (Parse.type_args -- Parse.binding -- Parse.opt_mixfix >> (fn ((args, a), mx) => Typedecl.typedecl {final = true} (a, map (rpair dummyS) args, mx) #> snd)); val _ = Outer_Syntax.local_theory \<^command_keyword>\type_synonym\ "declare type abbreviation" (Parse.type_args -- Parse.binding -- (\<^keyword>\=\ |-- Parse.!!! (Parse.typ -- Parse.opt_mixfix')) >> (fn ((args, a), (rhs, mx)) => snd o Typedecl.abbrev_cmd (a, args, mx) rhs)); in end\ subsubsection \Consts\ ML \ local val _ = Outer_Syntax.command \<^command_keyword>\judgment\ "declare object-logic judgment" (Parse.const_binding >> (Toplevel.theory o Object_Logic.add_judgment_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\consts\ "declare constants" (Scan.repeat1 Parse.const_binding >> (Toplevel.theory o Sign.add_consts_cmd)); in end\ subsubsection \Syntax and translations\ ML \ local val _ = Outer_Syntax.command \<^command_keyword>\nonterminal\ "declare syntactic type constructors (grammar nonterminal symbols)" (Parse.and_list1 Parse.binding >> (Toplevel.theory o Sign.add_nonterminals_global)); val _ = Outer_Syntax.local_theory \<^command_keyword>\syntax\ "add raw syntax clauses" (Parse.syntax_mode -- Scan.repeat1 Parse.const_decl >> uncurry (Local_Theory.syntax_cmd true)); val _ = Outer_Syntax.local_theory \<^command_keyword>\no_syntax\ "delete raw syntax clauses" (Parse.syntax_mode -- Scan.repeat1 Parse.const_decl >> uncurry (Local_Theory.syntax_cmd false)); val trans_pat = Scan.optional (\<^keyword>\(\ |-- Parse.!!! (Parse.inner_syntax Parse.name --| \<^keyword>\)\)) "logic" -- Parse.inner_syntax Parse.string; fun trans_arrow toks = ((\<^keyword>\\\ || \<^keyword>\=>\) >> K Syntax.Parse_Rule || (\<^keyword>\\\ || \<^keyword>\<=\) >> K Syntax.Print_Rule || (\<^keyword>\\\ || \<^keyword>\==\) >> K Syntax.Parse_Print_Rule) toks; val trans_line = trans_pat -- Parse.!!! (trans_arrow -- trans_pat) >> (fn (left, (arr, right)) => arr (left, right)); val _ = Outer_Syntax.command \<^command_keyword>\translations\ "add syntax translation rules" (Scan.repeat1 trans_line >> (Toplevel.theory o Isar_Cmd.translations)); val _ = Outer_Syntax.command \<^command_keyword>\no_translations\ "delete syntax translation rules" (Scan.repeat1 trans_line >> (Toplevel.theory o Isar_Cmd.no_translations)); in end\ subsubsection \Translation functions\ ML \ local val _ = Outer_Syntax.command \<^command_keyword>\parse_ast_translation\ "install parse ast translation functions" (Parse.ML_source >> (Toplevel.theory o Isar_Cmd.parse_ast_translation)); val _ = Outer_Syntax.command \<^command_keyword>\parse_translation\ "install parse translation functions" (Parse.ML_source >> (Toplevel.theory o Isar_Cmd.parse_translation)); val _ = Outer_Syntax.command \<^command_keyword>\print_translation\ "install print translation functions" (Parse.ML_source >> (Toplevel.theory o Isar_Cmd.print_translation)); val _ = Outer_Syntax.command \<^command_keyword>\typed_print_translation\ "install typed print translation functions" (Parse.ML_source >> (Toplevel.theory o Isar_Cmd.typed_print_translation)); val _ = Outer_Syntax.command \<^command_keyword>\print_ast_translation\ "install print ast translation functions" (Parse.ML_source >> (Toplevel.theory o Isar_Cmd.print_ast_translation)); in end\ subsubsection \Specifications\ ML \ local val _ = Outer_Syntax.local_theory' \<^command_keyword>\definition\ "constant definition" (Scan.option Parse_Spec.constdecl -- (Parse_Spec.opt_thm_name ":" -- Parse.prop) -- Parse_Spec.if_assumes -- Parse.for_fixes >> (fn (((decl, spec), prems), params) => #2 oo Specification.definition_cmd decl params prems spec)); val _ = Outer_Syntax.local_theory' \<^command_keyword>\abbreviation\ "constant abbreviation" (Parse.syntax_mode -- Scan.option Parse_Spec.constdecl -- Parse.prop -- Parse.for_fixes >> (fn (((mode, decl), spec), params) => Specification.abbreviation_cmd mode decl params spec)); val axiomatization = Parse.and_list1 (Parse_Spec.thm_name ":" -- Parse.prop) -- Parse_Spec.if_assumes -- Parse.for_fixes >> (fn ((a, b), c) => (c, b, a)); val _ = Outer_Syntax.command \<^command_keyword>\axiomatization\ "axiomatic constant specification" (Scan.optional Parse.vars [] -- Scan.optional (Parse.where_ |-- Parse.!!! axiomatization) ([], [], []) >> (fn (a, (b, c, d)) => Toplevel.theory (#2 o Specification.axiomatization_cmd a b c d))); val _ = Outer_Syntax.local_theory \<^command_keyword>\alias\ "name-space alias for constant" (Parse.binding -- (Parse.!!! \<^keyword>\=\ |-- Parse.name_position) >> Specification.alias_cmd); val _ = Outer_Syntax.local_theory \<^command_keyword>\type_alias\ "name-space alias for type constructor" (Parse.binding -- (Parse.!!! \<^keyword>\=\ |-- Parse.name_position) >> Specification.type_alias_cmd); in end\ subsubsection \Notation\ ML \ local val _ = Outer_Syntax.local_theory \<^command_keyword>\type_notation\ "add concrete syntax for type constructors" (Parse.syntax_mode -- Parse.and_list1 (Parse.type_const -- Parse.mixfix) >> (fn (mode, args) => Local_Theory.type_notation_cmd true mode args)); val _ = Outer_Syntax.local_theory \<^command_keyword>\no_type_notation\ "delete concrete syntax for type constructors" (Parse.syntax_mode -- Parse.and_list1 (Parse.type_const -- Parse.mixfix) >> (fn (mode, args) => Local_Theory.type_notation_cmd false mode args)); val _ = Outer_Syntax.local_theory \<^command_keyword>\notation\ "add concrete syntax for constants / fixed variables" (Parse.syntax_mode -- Parse.and_list1 (Parse.const -- Parse.mixfix) >> (fn (mode, args) => Local_Theory.notation_cmd true mode args)); val _ = Outer_Syntax.local_theory \<^command_keyword>\no_notation\ "delete concrete syntax for constants / fixed variables" (Parse.syntax_mode -- Parse.and_list1 (Parse.const -- Parse.mixfix) >> (fn (mode, args) => Local_Theory.notation_cmd false mode args)); in end\ subsubsection \Theorems\ ML \ local val long_keyword = Parse_Spec.includes >> K "" || Parse_Spec.long_statement_keyword; val long_statement = Scan.optional (Parse_Spec.opt_thm_name ":" --| Scan.ahead long_keyword) Binding.empty_atts -- Scan.optional Parse_Spec.includes [] -- Parse_Spec.long_statement >> (fn ((binding, includes), (elems, concl)) => (true, binding, includes, elems, concl)); val short_statement = Parse_Spec.statement -- Parse_Spec.if_statement -- Parse.for_fixes >> (fn ((shows, assumes), fixes) => (false, Binding.empty_atts, [], [Element.Fixes fixes, Element.Assumes assumes], Element.Shows shows)); fun theorem spec schematic descr = Outer_Syntax.local_theory_to_proof' spec ("state " ^ descr) ((long_statement || short_statement) >> (fn (long, binding, includes, elems, concl) => ((if schematic then Specification.schematic_theorem_cmd else Specification.theorem_cmd) long Thm.theoremK NONE (K I) binding includes elems concl))); val _ = theorem \<^command_keyword>\theorem\ false "theorem"; val _ = theorem \<^command_keyword>\lemma\ false "lemma"; val _ = theorem \<^command_keyword>\corollary\ false "corollary"; val _ = theorem \<^command_keyword>\proposition\ false "proposition"; val _ = theorem \<^command_keyword>\schematic_goal\ true "schematic goal"; in end\ ML \ local val _ = Outer_Syntax.local_theory' \<^command_keyword>\lemmas\ "define theorems" (Parse_Spec.name_facts -- Parse.for_fixes >> (fn (facts, fixes) => #2 oo Specification.theorems_cmd Thm.theoremK facts fixes)); val _ = Outer_Syntax.local_theory' \<^command_keyword>\declare\ "declare theorems" (Parse.and_list1 Parse.thms1 -- Parse.for_fixes >> (fn (facts, fixes) => #2 oo Specification.theorems_cmd "" [(Binding.empty_atts, flat facts)] fixes)); val _ = Outer_Syntax.local_theory \<^command_keyword>\named_theorems\ "declare named collection of theorems" - (Parse.and_list1 (Parse.binding -- Scan.optional Parse.text "") >> + (Parse.and_list1 (Parse.binding -- Scan.optional Parse.embedded "") >> fold (fn (b, descr) => snd o Named_Theorems.declare b descr)); in end\ subsubsection \Hide names\ ML \ local fun hide_names command_keyword what hide parse prep = Outer_Syntax.command command_keyword ("hide " ^ what ^ " from name space") ((Parse.opt_keyword "open" >> not) -- Scan.repeat1 parse >> (fn (fully, args) => (Toplevel.theory (fn thy => let val ctxt = Proof_Context.init_global thy in fold (hide fully o prep ctxt) args thy end)))); val _ = hide_names \<^command_keyword>\hide_class\ "classes" Sign.hide_class Parse.class Proof_Context.read_class; val _ = hide_names \<^command_keyword>\hide_type\ "types" Sign.hide_type Parse.type_const ((#1 o dest_Type) oo Proof_Context.read_type_name {proper = true, strict = false}); val _ = hide_names \<^command_keyword>\hide_const\ "consts" Sign.hide_const Parse.const ((#1 o dest_Const) oo Proof_Context.read_const {proper = true, strict = false}); val _ = hide_names \<^command_keyword>\hide_fact\ "facts" Global_Theory.hide_fact Parse.name_position (Global_Theory.check_fact o Proof_Context.theory_of); in end\ subsection \Bundled declarations\ ML \ local val _ = Outer_Syntax.maybe_begin_local_theory \<^command_keyword>\bundle\ "define bundle of declarations" ((Parse.binding --| \<^keyword>\=\) -- Parse.thms1 -- Parse.for_fixes >> (uncurry Bundle.bundle_cmd)) (Parse.binding --| Parse.begin >> Bundle.init); val _ = Outer_Syntax.local_theory \<^command_keyword>\unbundle\ "activate declarations from bundle in local theory" (Scan.repeat1 Parse.name_position >> Bundle.unbundle_cmd); val _ = Outer_Syntax.command \<^command_keyword>\include\ "activate declarations from bundle in proof body" (Scan.repeat1 Parse.name_position >> (Toplevel.proof o Bundle.include_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\including\ "activate declarations from bundle in goal refinement" (Scan.repeat1 Parse.name_position >> (Toplevel.proof o Bundle.including_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\print_bundles\ "print bundles of declarations" (Parse.opt_bang >> (fn b => Toplevel.keep (Bundle.print_bundles b o Toplevel.context_of))); in end\ subsection \Local theory specifications\ subsubsection \Specification context\ ML \ local val _ = Outer_Syntax.command \<^command_keyword>\context\ "begin local theory context" (((Parse.name_position -- Scan.optional Parse_Spec.opening []) >> (fn (name, incls) => Toplevel.begin_main_target true (Target_Context.context_begin_named_cmd incls name)) || Scan.optional Parse_Spec.includes [] -- Scan.repeat Parse_Spec.context_element >> (fn (incls, elems) => Toplevel.begin_nested_target (Target_Context.context_begin_nested_cmd incls elems))) --| Parse.begin); val _ = Outer_Syntax.command \<^command_keyword>\end\ "end context" (Scan.succeed (Toplevel.exit o Toplevel.end_main_target o Toplevel.end_nested_target o Toplevel.end_proof (K Proof.end_notepad))); in end\ subsubsection \Locales and interpretation\ ML \ local val locale_context_elements = Scan.repeat1 Parse_Spec.context_element; val locale_val = ((Parse_Spec.locale_expression -- Scan.optional Parse_Spec.opening []) || Parse_Spec.opening >> pair ([], [])) -- Scan.optional (\<^keyword>\+\ |-- Parse.!!! locale_context_elements) [] || locale_context_elements >> pair (([], []), []); val _ = Outer_Syntax.command \<^command_keyword>\locale\ "define named specification context" (Parse.binding -- Scan.optional (\<^keyword>\=\ |-- Parse.!!! locale_val) ((([], []), []), []) -- Parse.opt_begin >> (fn ((name, ((expr, includes), elems)), begin) => Toplevel.begin_main_target begin (Expression.add_locale_cmd name Binding.empty includes expr elems #> snd))); val _ = Outer_Syntax.command \<^command_keyword>\experiment\ "open private specification context" (Scan.repeat Parse_Spec.context_element --| Parse.begin >> (fn elems => Toplevel.begin_main_target true (Experiment.experiment_cmd elems #> snd))); val _ = Outer_Syntax.command \<^command_keyword>\interpret\ "prove interpretation of locale expression in proof context" (Parse.!!! Parse_Spec.locale_expression >> (fn expr => Toplevel.proof (Interpretation.interpret_cmd expr))); val interpretation_args_with_defs = Parse.!!! Parse_Spec.locale_expression -- (Scan.optional (\<^keyword>\defines\ |-- Parse.and_list1 (Parse_Spec.opt_thm_name ":" -- ((Parse.binding -- Parse.opt_mixfix') --| \<^keyword>\=\ -- Parse.term))) ([])); val _ = Outer_Syntax.local_theory_to_proof \<^command_keyword>\global_interpretation\ "prove interpretation of locale expression into global theory" (interpretation_args_with_defs >> (fn (expr, defs) => Interpretation.global_interpretation_cmd expr defs)); val _ = Outer_Syntax.command \<^command_keyword>\sublocale\ "prove sublocale relation between a locale and a locale expression" ((Parse.name_position --| (\<^keyword>\\\ || \<^keyword>\<\) -- interpretation_args_with_defs >> (fn (loc, (expr, defs)) => Toplevel.theory_to_proof (Interpretation.global_sublocale_cmd loc expr defs))) || interpretation_args_with_defs >> (fn (expr, defs) => Toplevel.local_theory_to_proof NONE NONE (Interpretation.sublocale_cmd expr defs))); val _ = Outer_Syntax.command \<^command_keyword>\interpretation\ "prove interpretation of locale expression in local theory or into global theory" (Parse.!!! Parse_Spec.locale_expression >> (fn expr => Toplevel.local_theory_to_proof NONE NONE (Interpretation.isar_interpretation_cmd expr))); in end\ subsubsection \Type classes\ ML \ local val class_context_elements = Scan.repeat1 Parse_Spec.context_element; val class_val = ((Parse_Spec.class_expression -- Scan.optional Parse_Spec.opening []) || Parse_Spec.opening >> pair []) -- Scan.optional (\<^keyword>\+\ |-- Parse.!!! class_context_elements) [] || class_context_elements >> pair ([], []); val _ = Outer_Syntax.command \<^command_keyword>\class\ "define type class" (Parse.binding -- Scan.optional (\<^keyword>\=\ |-- class_val) (([], []), []) -- Parse.opt_begin >> (fn ((name, ((supclasses, includes), elems)), begin) => Toplevel.begin_main_target begin (Class_Declaration.class_cmd name includes supclasses elems #> snd))); val _ = Outer_Syntax.local_theory_to_proof \<^command_keyword>\subclass\ "prove a subclass relation" (Parse.class >> Class_Declaration.subclass_cmd); val _ = Outer_Syntax.command \<^command_keyword>\instantiation\ "instantiate and prove type arity" (Parse.multi_arity --| Parse.begin >> (fn arities => Toplevel.begin_main_target true (Class.instantiation_cmd arities))); val _ = Outer_Syntax.command \<^command_keyword>\instance\ "prove type arity or subclass relation" ((Parse.class -- ((\<^keyword>\\\ || \<^keyword>\<\) |-- Parse.!!! Parse.class) >> Class.classrel_cmd || Parse.multi_arity >> Class.instance_arity_cmd) >> Toplevel.theory_to_proof || Scan.succeed (Toplevel.local_theory_to_proof NONE NONE (Class.instantiation_instance I))); in end\ subsubsection \Arbitrary overloading\ ML \ local val _ = Outer_Syntax.command \<^command_keyword>\overloading\ "overloaded definitions" (Scan.repeat1 (Parse.name --| (\<^keyword>\==\ || \<^keyword>\\\) -- Parse.term -- Scan.optional (\<^keyword>\(\ |-- (\<^keyword>\unchecked\ >> K false) --| \<^keyword>\)\) true >> Scan.triple1) --| Parse.begin >> (fn operations => Toplevel.begin_main_target true (Overloading.overloading_cmd operations))); in end\ subsection \Proof commands\ ML \ local val _ = Outer_Syntax.local_theory_to_proof \<^command_keyword>\notepad\ "begin proof context" (Parse.begin >> K Proof.begin_notepad); in end\ subsubsection \Statements\ ML \ local val structured_statement = Parse_Spec.statement -- Parse_Spec.cond_statement -- Parse.for_fixes >> (fn ((shows, (strict, assumes)), fixes) => (strict, fixes, assumes, shows)); val _ = Outer_Syntax.command \<^command_keyword>\have\ "state local goal" (structured_statement >> (fn (a, b, c, d) => Toplevel.proof' (fn int => Proof.have_cmd a NONE (K I) b c d int #> #2))); val _ = Outer_Syntax.command \<^command_keyword>\show\ "state local goal, to refine pending subgoals" (structured_statement >> (fn (a, b, c, d) => Toplevel.proof' (fn int => Proof.show_cmd a NONE (K I) b c d int #> #2))); val _ = Outer_Syntax.command \<^command_keyword>\hence\ "old-style alias of \"then have\"" (structured_statement >> (fn (a, b, c, d) => Toplevel.proof' (fn int => Proof.chain #> Proof.have_cmd a NONE (K I) b c d int #> #2))); val _ = Outer_Syntax.command \<^command_keyword>\thus\ "old-style alias of \"then show\"" (structured_statement >> (fn (a, b, c, d) => Toplevel.proof' (fn int => Proof.chain #> Proof.show_cmd a NONE (K I) b c d int #> #2))); in end\ subsubsection \Local facts\ ML \ local val facts = Parse.and_list1 Parse.thms1; val _ = Outer_Syntax.command \<^command_keyword>\then\ "forward chaining" (Scan.succeed (Toplevel.proof Proof.chain)); val _ = Outer_Syntax.command \<^command_keyword>\from\ "forward chaining from given facts" (facts >> (Toplevel.proof o Proof.from_thmss_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\with\ "forward chaining from given and current facts" (facts >> (Toplevel.proof o Proof.with_thmss_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\note\ "define facts" (Parse_Spec.name_facts >> (Toplevel.proof o Proof.note_thmss_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\supply\ "define facts during goal refinement (unstructured)" (Parse_Spec.name_facts >> (Toplevel.proof o Proof.supply_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\using\ "augment goal facts" (facts >> (Toplevel.proof o Proof.using_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\unfolding\ "unfold definitions in goal and facts" (facts >> (Toplevel.proof o Proof.unfolding_cmd)); in end\ subsubsection \Proof context\ ML \ local val structured_statement = Parse_Spec.statement -- Parse_Spec.if_statement' -- Parse.for_fixes >> (fn ((shows, assumes), fixes) => (fixes, assumes, shows)); val _ = Outer_Syntax.command \<^command_keyword>\fix\ "fix local variables (Skolem constants)" (Parse.vars >> (Toplevel.proof o Proof.fix_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\assume\ "assume propositions" (structured_statement >> (fn (a, b, c) => Toplevel.proof (Proof.assume_cmd a b c))); val _ = Outer_Syntax.command \<^command_keyword>\presume\ "assume propositions, to be established later" (structured_statement >> (fn (a, b, c) => Toplevel.proof (Proof.presume_cmd a b c))); val _ = Outer_Syntax.command \<^command_keyword>\define\ "local definition (non-polymorphic)" ((Parse.vars --| Parse.where_) -- Parse_Spec.statement -- Parse.for_fixes >> (fn ((a, b), c) => Toplevel.proof (Proof.define_cmd a c b))); val _ = Outer_Syntax.command \<^command_keyword>\consider\ "state cases rule" (Parse_Spec.obtains >> (Toplevel.proof' o Obtain.consider_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\obtain\ "generalized elimination" (Parse.parbinding -- Scan.optional (Parse.vars --| Parse.where_) [] -- structured_statement >> (fn ((a, b), (c, d, e)) => Toplevel.proof' (Obtain.obtain_cmd a b c d e))); val _ = Outer_Syntax.command \<^command_keyword>\let\ "bind text variables" (Parse.and_list1 (Parse.and_list1 Parse.term -- (\<^keyword>\=\ |-- Parse.term)) >> (Toplevel.proof o Proof.let_bind_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\write\ "add concrete syntax for constants / fixed variables" (Parse.syntax_mode -- Parse.and_list1 (Parse.const -- Parse.mixfix) >> (fn (mode, args) => Toplevel.proof (Proof.write_cmd mode args))); val _ = Outer_Syntax.command \<^command_keyword>\case\ "invoke local context" (Parse_Spec.opt_thm_name ":" -- (\<^keyword>\(\ |-- Parse.!!! (Parse.name_position -- Scan.repeat (Parse.maybe Parse.binding) --| \<^keyword>\)\) || Parse.name_position >> rpair []) >> (Toplevel.proof o Proof.case_cmd)); in end\ subsubsection \Proof structure\ ML \ local val _ = Outer_Syntax.command \<^command_keyword>\{\ "begin explicit proof block" (Scan.succeed (Toplevel.proof Proof.begin_block)); val _ = Outer_Syntax.command \<^command_keyword>\}\ "end explicit proof block" (Scan.succeed (Toplevel.proof Proof.end_block)); val _ = Outer_Syntax.command \<^command_keyword>\next\ "enter next proof block" (Scan.succeed (Toplevel.proof Proof.next_block)); in end\ subsubsection \End proof\ ML \ local val _ = Outer_Syntax.command \<^command_keyword>\qed\ "conclude proof" (Scan.option Method.parse >> (fn m => (Option.map Method.report m; Isar_Cmd.qed m))); val _ = Outer_Syntax.command \<^command_keyword>\by\ "terminal backward proof" (Method.parse -- Scan.option Method.parse >> (fn (m1, m2) => (Method.report m1; Option.map Method.report m2; Isar_Cmd.terminal_proof (m1, m2)))); val _ = Outer_Syntax.command \<^command_keyword>\..\ "default proof" (Scan.succeed Isar_Cmd.default_proof); val _ = Outer_Syntax.command \<^command_keyword>\.\ "immediate proof" (Scan.succeed Isar_Cmd.immediate_proof); val _ = Outer_Syntax.command \<^command_keyword>\done\ "done proof" (Scan.succeed Isar_Cmd.done_proof); val _ = Outer_Syntax.command \<^command_keyword>\sorry\ "skip proof (quick-and-dirty mode only!)" (Scan.succeed Isar_Cmd.skip_proof); val _ = Outer_Syntax.command \<^command_keyword>\\\ "dummy proof (quick-and-dirty mode only!)" (Scan.succeed Isar_Cmd.skip_proof); val _ = Outer_Syntax.command \<^command_keyword>\oops\ "forget proof" (Scan.succeed Toplevel.forget_proof); in end\ subsubsection \Proof steps\ ML \ local val _ = Outer_Syntax.command \<^command_keyword>\defer\ "shuffle internal proof state" (Scan.optional Parse.nat 1 >> (Toplevel.proof o Proof.defer)); val _ = Outer_Syntax.command \<^command_keyword>\prefer\ "shuffle internal proof state" (Parse.nat >> (Toplevel.proof o Proof.prefer)); val _ = Outer_Syntax.command \<^command_keyword>\apply\ "initial goal refinement step (unstructured)" (Method.parse >> (fn m => (Method.report m; Toplevel.proofs (Proof.apply m)))); val _ = Outer_Syntax.command \<^command_keyword>\apply_end\ "terminal goal refinement step (unstructured)" (Method.parse >> (fn m => (Method.report m; Toplevel.proofs (Proof.apply_end m)))); val _ = Outer_Syntax.command \<^command_keyword>\proof\ "backward proof step" (Scan.option Method.parse >> (fn m => (Option.map Method.report m; Toplevel.proof (fn state => let val state' = state |> Proof.proof m |> Seq.the_result ""; val _ = Output.information (Proof_Context.print_cases_proof (Proof.context_of state) (Proof.context_of state')); in state' end)))) in end\ subsubsection \Subgoal focus\ ML \ local val opt_fact_binding = Scan.optional (Parse.binding -- Parse.opt_attribs || Parse.attribs >> pair Binding.empty) Binding.empty_atts; val for_params = Scan.optional (\<^keyword>\for\ |-- Parse.!!! ((Scan.option Parse.dots >> is_some) -- (Scan.repeat1 (Parse.maybe_position Parse.name_position)))) (false, []); val _ = Outer_Syntax.command \<^command_keyword>\subgoal\ "focus on first subgoal within backward refinement" (opt_fact_binding -- (Scan.option (\<^keyword>\premises\ |-- Parse.!!! opt_fact_binding)) -- for_params >> (fn ((a, b), c) => Toplevel.proofs (Seq.make_results o Seq.single o #2 o Subgoal.subgoal_cmd a b c))); in end\ subsubsection \Calculation\ ML \ local val calculation_args = Scan.option (\<^keyword>\(\ |-- Parse.!!! ((Parse.thms1 --| \<^keyword>\)\))); val _ = Outer_Syntax.command \<^command_keyword>\also\ "combine calculation and current facts" (calculation_args >> (Toplevel.proofs' o Calculation.also_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\finally\ "combine calculation and current facts, exhibit result" (calculation_args >> (Toplevel.proofs' o Calculation.finally_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\moreover\ "augment calculation by current facts" (Scan.succeed (Toplevel.proof' Calculation.moreover)); val _ = Outer_Syntax.command \<^command_keyword>\ultimately\ "augment calculation by current facts, exhibit result" (Scan.succeed (Toplevel.proof' Calculation.ultimately)); val _ = Outer_Syntax.command \<^command_keyword>\print_trans_rules\ "print transitivity rules" (Scan.succeed (Toplevel.keep (Calculation.print_rules o Toplevel.context_of))); in end\ subsubsection \Proof navigation\ ML \ local fun report_back () = Output.report [Markup.markup (Markup.bad ()) "Explicit backtracking"]; val _ = Outer_Syntax.command \<^command_keyword>\back\ "explicit backtracking of proof command" (Scan.succeed (Toplevel.actual_proof (fn prf => (report_back (); Proof_Node.back prf)) o Toplevel.skip_proof report_back)); in end\ subsection \Diagnostic commands (for interactive mode only)\ ML \ local val opt_modes = Scan.optional (\<^keyword>\(\ |-- Parse.!!! (Scan.repeat1 Parse.name --| \<^keyword>\)\)) []; val _ = Outer_Syntax.command \<^command_keyword>\help\ "retrieve outer syntax commands according to name patterns" (Scan.repeat Parse.name >> (fn pats => Toplevel.keep (fn st => Outer_Syntax.help (Toplevel.theory_of st) pats))); val _ = Outer_Syntax.command \<^command_keyword>\print_commands\ "print outer syntax commands" (Scan.succeed (Toplevel.keep (Outer_Syntax.print_commands o Toplevel.theory_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_options\ "print configuration options" (Parse.opt_bang >> (fn b => Toplevel.keep (Attrib.print_options b o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_context\ "print context of local theory target" (Scan.succeed (Toplevel.keep (Pretty.writeln_chunks o Toplevel.pretty_context))); val _ = Outer_Syntax.command \<^command_keyword>\print_theory\ "print logical theory contents" (Parse.opt_bang >> (fn b => Toplevel.keep (Pretty.writeln o Proof_Display.pretty_theory b o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_definitions\ "print dependencies of definitional theory content" (Parse.opt_bang >> (fn b => Toplevel.keep (Pretty.writeln o Proof_Display.pretty_definitions b o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_syntax\ "print inner syntax of context" (Scan.succeed (Toplevel.keep (Proof_Context.print_syntax o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_defn_rules\ "print definitional rewrite rules of context" (Scan.succeed (Toplevel.keep (Local_Defs.print_rules o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_abbrevs\ "print constant abbreviations of context" (Parse.opt_bang >> (fn b => Toplevel.keep (Proof_Context.print_abbrevs b o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_theorems\ "print theorems of local theory or proof context" (Parse.opt_bang >> (fn b => Toplevel.keep (Pretty.writeln o Pretty.chunks o Isar_Cmd.pretty_theorems b))); val _ = Outer_Syntax.command \<^command_keyword>\print_locales\ "print locales of this theory" (Parse.opt_bang >> (fn verbose => Toplevel.keep (fn state => let val thy = Toplevel.theory_of state in Pretty.writeln (Locale.pretty_locales thy verbose) end))); val _ = Outer_Syntax.command \<^command_keyword>\print_classes\ "print classes of this theory" (Scan.succeed (Toplevel.keep (Class.print_classes o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_locale\ "print locale of this theory" (Parse.opt_bang -- Parse.name_position >> (fn (show_facts, raw_name) => Toplevel.keep (fn state => let val thy = Toplevel.theory_of state; val name = Locale.check thy raw_name; in Pretty.writeln (Locale.pretty_locale thy show_facts name) end))); val _ = Outer_Syntax.command \<^command_keyword>\print_interps\ "print interpretations of locale for this theory or proof context" (Parse.name_position >> (fn raw_name => Toplevel.keep (fn state => let val ctxt = Toplevel.context_of state; val thy = Toplevel.theory_of state; val name = Locale.check thy raw_name; in Pretty.writeln (Locale.pretty_registrations ctxt name) end))); val _ = Outer_Syntax.command \<^command_keyword>\print_attributes\ "print attributes of this theory" (Parse.opt_bang >> (fn b => Toplevel.keep (Attrib.print_attributes b o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_simpset\ "print context of Simplifier" (Parse.opt_bang >> (fn b => Toplevel.keep (Pretty.writeln o Simplifier.pretty_simpset b o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_rules\ "print intro/elim rules" (Scan.succeed (Toplevel.keep (Context_Rules.print_rules o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_methods\ "print methods of this theory" (Parse.opt_bang >> (fn b => Toplevel.keep (Method.print_methods b o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_antiquotations\ "print document antiquotations" (Parse.opt_bang >> (fn b => Toplevel.keep (Document_Antiquotation.print_antiquotations b o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_ML_antiquotations\ "print ML antiquotations" (Parse.opt_bang >> (fn b => Toplevel.keep (ML_Context.print_antiquotations b o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\locale_deps\ "visualize locale dependencies" (Scan.succeed (Toplevel.keep (Toplevel.theory_of #> (fn thy => Locale.pretty_locale_deps thy |> map (fn {name, parents, body} => ((name, Graph_Display.content_node (Locale.extern thy name) [body]), parents)) |> Graph_Display.display_graph_old)))); val _ = Outer_Syntax.command \<^command_keyword>\print_term_bindings\ "print term bindings of proof context" (Scan.succeed (Toplevel.keep (Pretty.writeln_chunks o Proof_Context.pretty_term_bindings o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_facts\ "print facts of proof context" (Parse.opt_bang >> (fn b => Toplevel.keep (Proof_Context.print_local_facts b o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_cases\ "print cases of proof context" (Scan.succeed (Toplevel.keep (Pretty.writeln_chunks o Proof_Context.pretty_cases o Toplevel.context_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_statement\ "print theorems as long statements" (opt_modes -- Parse.thms1 >> Isar_Cmd.print_stmts); val _ = Outer_Syntax.command \<^command_keyword>\thm\ "print theorems" (opt_modes -- Parse.thms1 >> Isar_Cmd.print_thms); val _ = Outer_Syntax.command \<^command_keyword>\prf\ "print proof terms of theorems" (opt_modes -- Scan.option Parse.thms1 >> Isar_Cmd.print_prfs false); val _ = Outer_Syntax.command \<^command_keyword>\full_prf\ "print full proof terms of theorems" (opt_modes -- Scan.option Parse.thms1 >> Isar_Cmd.print_prfs true); val _ = Outer_Syntax.command \<^command_keyword>\prop\ "read and print proposition" (opt_modes -- Parse.term >> Isar_Cmd.print_prop); val _ = Outer_Syntax.command \<^command_keyword>\term\ "read and print term" (opt_modes -- Parse.term >> Isar_Cmd.print_term); val _ = Outer_Syntax.command \<^command_keyword>\typ\ "read and print type" (opt_modes -- (Parse.typ -- Scan.option (\<^keyword>\::\ |-- Parse.!!! Parse.sort)) >> Isar_Cmd.print_type); val _ = Outer_Syntax.command \<^command_keyword>\print_codesetup\ "print code generator setup" (Scan.succeed (Toplevel.keep (Code.print_codesetup o Toplevel.theory_of))); val _ = Outer_Syntax.command \<^command_keyword>\print_state\ "print current proof state (if present)" (opt_modes >> (fn modes => Toplevel.keep (Print_Mode.with_modes modes (Output.state o Toplevel.string_of_state)))); val _ = Outer_Syntax.command \<^command_keyword>\welcome\ "print welcome message" (Scan.succeed (Toplevel.keep (fn _ => writeln (Session.welcome ())))); in end\ subsection \Dependencies\ ML \ local val theory_bounds = Parse.theory_name >> single || (\<^keyword>\(\ |-- Parse.enum "|" Parse.theory_name --| \<^keyword>\)\); val _ = Outer_Syntax.command \<^command_keyword>\thy_deps\ "visualize theory dependencies" (Scan.option theory_bounds -- Scan.option theory_bounds >> (fn args => Toplevel.keep (fn st => Thy_Deps.thy_deps_cmd (Toplevel.context_of st) args))); val class_bounds = Parse.sort >> single || (\<^keyword>\(\ |-- Parse.enum "|" Parse.sort --| \<^keyword>\)\); val _ = Outer_Syntax.command \<^command_keyword>\class_deps\ "visualize class dependencies" (Scan.option class_bounds -- Scan.option class_bounds >> (fn args => Toplevel.keep (fn st => Class_Deps.class_deps_cmd (Toplevel.context_of st) args))); val _ = Outer_Syntax.command \<^command_keyword>\thm_deps\ "print theorem dependencies (immediate non-transitive)" (Parse.thms1 >> (fn args => Toplevel.keep (fn st => let val thy = Toplevel.theory_of st; val ctxt = Toplevel.context_of st; in Pretty.writeln (Thm_Deps.pretty_thm_deps thy (Attrib.eval_thms ctxt args)) end))); val _ = Outer_Syntax.command \<^command_keyword>\thm_oracles\ "print all oracles used in theorems (full graph of transitive dependencies)" (Parse.thms1 >> (fn args => Toplevel.keep (fn st => let val ctxt = Toplevel.context_of st; val thms = Attrib.eval_thms ctxt args; in Pretty.writeln (Thm_Deps.pretty_thm_oracles ctxt thms) end))); val thy_names = Scan.repeat1 (Scan.unless Parse.minus Parse.theory_name); val _ = Outer_Syntax.command \<^command_keyword>\unused_thms\ "find unused theorems" (Scan.option ((thy_names --| Parse.minus) -- Scan.option thy_names) >> (fn opt_range => Toplevel.keep (fn st => let val thy = Toplevel.theory_of st; val ctxt = Toplevel.context_of st; fun pretty_thm (a, th) = Proof_Context.pretty_fact ctxt (a, [th]); val check = Theory.check {long = false} ctxt; in Thm_Deps.unused_thms_cmd (case opt_range of NONE => (Theory.parents_of thy, [thy]) | SOME (xs, NONE) => (map check xs, [thy]) | SOME (xs, SOME ys) => (map check xs, map check ys)) |> map pretty_thm |> Pretty.writeln_chunks end))); in end\ subsubsection \Find consts and theorems\ ML \ local val _ = Outer_Syntax.command \<^command_keyword>\find_consts\ "find constants by name / type patterns" (Find_Consts.query_parser >> (fn spec => Toplevel.keep (fn st => Pretty.writeln (Find_Consts.pretty_consts (Toplevel.context_of st) spec)))); val options = Scan.optional (Parse.$$$ "(" |-- Parse.!!! (Scan.option Parse.nat -- Scan.optional (Parse.reserved "with_dups" >> K false) true --| Parse.$$$ ")")) (NONE, true); val _ = Outer_Syntax.command \<^command_keyword>\find_theorems\ "find theorems meeting specified criteria" (options -- Find_Theorems.query_parser >> (fn ((opt_lim, rem_dups), spec) => Toplevel.keep (fn st => Pretty.writeln (Find_Theorems.pretty_theorems (Find_Theorems.proof_state st) opt_lim rem_dups spec)))); in end\ subsection \Code generation\ ML \ local val _ = Outer_Syntax.command \<^command_keyword>\code_datatype\ "define set of code datatype constructors" (Scan.repeat1 Parse.term >> (Toplevel.theory o Code.declare_datatype_cmd)); in end\ subsection \Extraction of programs from proofs\ ML \ local val parse_vars = Scan.optional (Parse.$$$ "(" |-- Parse.list1 Parse.name --| Parse.$$$ ")") []; val _ = Outer_Syntax.command \<^command_keyword>\realizers\ "specify realizers for primitive axioms / theorems, together with correctness proof" (Scan.repeat1 (Parse.name -- parse_vars --| Parse.$$$ ":" -- Parse.string -- Parse.string) >> (fn xs => Toplevel.theory (fn thy => Extraction.add_realizers (map (fn (((a, vs), s1), s2) => (Global_Theory.get_thm thy a, (vs, s1, s2))) xs) thy))); val _ = Outer_Syntax.command \<^command_keyword>\realizability\ "add equations characterizing realizability" (Scan.repeat1 Parse.string >> (Toplevel.theory o Extraction.add_realizes_eqns)); val _ = Outer_Syntax.command \<^command_keyword>\extract_type\ "add equations characterizing type of extracted program" (Scan.repeat1 Parse.string >> (Toplevel.theory o Extraction.add_typeof_eqns)); val _ = Outer_Syntax.command \<^command_keyword>\extract\ "extract terms from proofs" (Scan.repeat1 (Parse.name -- parse_vars) >> (fn xs => Toplevel.theory (fn thy => Extraction.extract (map (apfst (Global_Theory.get_thm thy)) xs) thy))); in end\ section \Auxiliary lemmas\ subsection \Meta-level connectives in assumptions\ lemma meta_mp: assumes "PROP P \ PROP Q" and "PROP P" shows "PROP Q" by (rule \PROP P \ PROP Q\ [OF \PROP P\]) lemmas meta_impE = meta_mp [elim_format] lemma meta_spec: assumes "\x. PROP P x" shows "PROP P x" by (rule \\x. PROP P x\) lemmas meta_allE = meta_spec [elim_format] lemma swap_params: "(\x y. PROP P x y) \ (\y x. PROP P x y)" .. lemma equal_allI: \(\x. PROP P x) \ (\x. PROP Q x)\ if \\x. PROP P x \ PROP Q x\ by (simp only: that) subsection \Meta-level conjunction\ lemma all_conjunction: "(\x. PROP A x &&& PROP B x) \ ((\x. PROP A x) &&& (\x. PROP B x))" proof assume conj: "\x. PROP A x &&& PROP B x" show "(\x. PROP A x) &&& (\x. PROP B x)" proof - fix x from conj show "PROP A x" by (rule conjunctionD1) from conj show "PROP B x" by (rule conjunctionD2) qed next assume conj: "(\x. PROP A x) &&& (\x. PROP B x)" fix x show "PROP A x &&& PROP B x" proof - show "PROP A x" by (rule conj [THEN conjunctionD1, rule_format]) show "PROP B x" by (rule conj [THEN conjunctionD2, rule_format]) qed qed lemma imp_conjunction: "(PROP A \ PROP B &&& PROP C) \ ((PROP A \ PROP B) &&& (PROP A \ PROP C))" proof assume conj: "PROP A \ PROP B &&& PROP C" show "(PROP A \ PROP B) &&& (PROP A \ PROP C)" proof - assume "PROP A" from conj [OF \PROP A\] show "PROP B" by (rule conjunctionD1) from conj [OF \PROP A\] show "PROP C" by (rule conjunctionD2) qed next assume conj: "(PROP A \ PROP B) &&& (PROP A \ PROP C)" assume "PROP A" show "PROP B &&& PROP C" proof - from \PROP A\ show "PROP B" by (rule conj [THEN conjunctionD1]) from \PROP A\ show "PROP C" by (rule conj [THEN conjunctionD2]) qed qed lemma conjunction_imp: "(PROP A &&& PROP B \ PROP C) \ (PROP A \ PROP B \ PROP C)" proof assume r: "PROP A &&& PROP B \ PROP C" assume ab: "PROP A" "PROP B" show "PROP C" proof (rule r) from ab show "PROP A &&& PROP B" . qed next assume r: "PROP A \ PROP B \ PROP C" assume conj: "PROP A &&& PROP B" show "PROP C" proof (rule r) from conj show "PROP A" by (rule conjunctionD1) from conj show "PROP B" by (rule conjunctionD2) qed qed declare [[ML_write_global = false]] end diff --git a/src/Pure/Thy/bibtex.ML b/src/Pure/Thy/bibtex.ML --- a/src/Pure/Thy/bibtex.ML +++ b/src/Pure/Thy/bibtex.ML @@ -1,66 +1,65 @@ (* Title: Pure/Thy/bibtex.ML Author: Makarius BibTeX support. *) signature BIBTEX = sig val check_database: Position.T -> string -> (string * Position.T) list * (string * Position.T) list val check_database_output: Position.T -> string -> unit val cite_macro: string Config.T end; structure Bibtex: BIBTEX = struct (* check database *) type message = string * Position.T; fun check_database pos0 database = \<^scala>\bibtex_check_database\ database |> YXML.parse_body |> let open XML.Decode in pair (list (pair string properties)) (list (pair string properties)) end |> (apply2 o map o apsnd) (fn pos => Position.of_properties (pos @ Position.get_props pos0)); fun check_database_output pos0 database = let val (errors, warnings) = check_database pos0 database in errors |> List.app (fn (msg, pos) => Output.error_message ("Bibtex error" ^ Position.here pos ^ ":\n " ^ msg)); warnings |> List.app (fn (msg, pos) => warning ("Bibtex warning" ^ Position.here pos ^ ":\n " ^ msg)) end; (* document antiquotations *) val cite_macro = Attrib.setup_config_string \<^binding>\cite_macro\ (K "cite"); val _ = Theory.setup (Document_Antiquotation.setup_option \<^binding>\cite_macro\ (Config.put cite_macro) #> Document_Output.antiquotation_raw \<^binding>\cite\ - (Scan.lift - (Scan.option (Parse.verbatim || Parse.cartouche) -- Parse.and_list1 Args.name_position)) + (Scan.lift (Scan.option Parse.cartouche -- Parse.and_list1 Args.name_position)) (fn ctxt => fn (opt, citations) => let val _ = Context_Position.reports ctxt (map (fn (name, pos) => (pos, Markup.citation name)) citations); val thy_name = Context.theory_long_name (Proof_Context.theory_of ctxt); val bibtex_entries = Resources.theory_bibtex_entries thy_name; val _ = if null bibtex_entries andalso thy_name <> Context.PureN then () else citations |> List.app (fn (name, pos) => if member (op =) bibtex_entries name then () else error ("Unknown Bibtex entry " ^ quote name ^ Position.here pos)); val opt_arg = (case opt of NONE => "" | SOME s => "[" ^ s ^ "]"); val arg = "{" ^ space_implode "," (map #1 citations) ^ "}"; in Latex.string ("\\" ^ Config.get ctxt cite_macro ^ opt_arg ^ arg) end)); end; diff --git a/src/Pure/Thy/document_antiquotations.ML b/src/Pure/Thy/document_antiquotations.ML --- a/src/Pure/Thy/document_antiquotations.ML +++ b/src/Pure/Thy/document_antiquotations.ML @@ -1,447 +1,447 @@ (* Title: Pure/Thy/document_antiquotations.ML Author: Makarius Miscellaneous document antiquotations. *) structure Document_Antiquotations: sig end = struct (* basic entities *) local type style = term -> term; fun pretty_term_style ctxt (style: style, t) = Document_Output.pretty_term ctxt (style t); fun pretty_thms_style ctxt (style: style, ths) = map (fn th => Document_Output.pretty_term ctxt (style (Thm.full_prop_of th))) ths; fun pretty_term_typ ctxt (style: style, t) = let val t' = style t in Document_Output.pretty_term ctxt (Type.constraint (Term.fastype_of t') t') end; fun pretty_term_typeof ctxt (style: style, t) = Syntax.pretty_typ ctxt (Term.fastype_of (style t)); fun pretty_const ctxt c = let val t = Const (c, Consts.type_scheme (Proof_Context.consts_of ctxt) c) handle TYPE (msg, _, _) => error msg; val (t', _) = yield_singleton (Variable.import_terms true) t ctxt; in Document_Output.pretty_term ctxt t' end; fun pretty_abbrev ctxt s = let val t = Syntax.read_term (Proof_Context.set_mode Proof_Context.mode_abbrev ctxt) s; fun err () = error ("Abbreviated constant expected: " ^ Syntax.string_of_term ctxt t); val (head, args) = Term.strip_comb t; val (c, T) = Term.dest_Const head handle TERM _ => err (); val (U, u) = Consts.the_abbreviation (Proof_Context.consts_of ctxt) c handle TYPE _ => err (); val t' = Term.betapplys (Envir.expand_atom T (U, u), args); val eq = Logic.mk_equals (t, t'); val ctxt' = Proof_Context.augment eq ctxt; in Proof_Context.pretty_term_abbrev ctxt' eq end; fun pretty_locale ctxt (name, pos) = let val thy = Proof_Context.theory_of ctxt in Pretty.str (Locale.extern thy (Locale.check thy (name, pos))) end; fun pretty_bundle ctxt (name, pos) = Pretty.str (Bundle.extern ctxt (Bundle.check ctxt (name, pos))); fun pretty_class ctxt s = Pretty.str (Proof_Context.extern_class ctxt (Proof_Context.read_class ctxt s)); fun pretty_type ctxt s = let val Type (name, _) = Proof_Context.read_type_name {proper = true, strict = false} ctxt s in Pretty.str (Proof_Context.extern_type ctxt name) end; fun pretty_prf full ctxt = Proof_Syntax.pretty_standard_proof_of ctxt full; fun pretty_theory ctxt (name, pos) = (Theory.check {long = true} ctxt (name, pos); Pretty.str name); val basic_entity = Document_Output.antiquotation_pretty_source_embedded; fun basic_entities name scan pretty = Document_Antiquotation.setup name scan (fn {context = ctxt, source = src, argument = xs} => Document_Output.pretty_items_source ctxt {embedded = false} src (map (pretty ctxt) xs)); val _ = Theory.setup (basic_entity \<^binding>\prop\ (Term_Style.parse -- Args.prop) pretty_term_style #> basic_entity \<^binding>\term\ (Term_Style.parse -- Args.term) pretty_term_style #> basic_entity \<^binding>\term_type\ (Term_Style.parse -- Args.term) pretty_term_typ #> basic_entity \<^binding>\typeof\ (Term_Style.parse -- Args.term) pretty_term_typeof #> basic_entity \<^binding>\const\ (Args.const {proper = true, strict = false}) pretty_const #> basic_entity \<^binding>\abbrev\ (Scan.lift Parse.embedded_inner_syntax) pretty_abbrev #> basic_entity \<^binding>\typ\ Args.typ_abbrev Syntax.pretty_typ #> basic_entity \<^binding>\locale\ (Scan.lift Parse.embedded_position) pretty_locale #> basic_entity \<^binding>\bundle\ (Scan.lift Parse.embedded_position) pretty_bundle #> basic_entity \<^binding>\class\ (Scan.lift Parse.embedded_inner_syntax) pretty_class #> basic_entity \<^binding>\type\ (Scan.lift Parse.embedded_inner_syntax) pretty_type #> basic_entity \<^binding>\theory\ (Scan.lift Parse.embedded_position) pretty_theory #> basic_entities \<^binding>\prf\ Attrib.thms (pretty_prf false) #> basic_entities \<^binding>\full_prf\ Attrib.thms (pretty_prf true) #> Document_Antiquotation.setup \<^binding>\thm\ (Term_Style.parse -- Attrib.thms) (fn {context = ctxt, source = src, argument = arg} => Document_Output.pretty_items_source ctxt {embedded = false} src (pretty_thms_style ctxt arg))); in end; (* Markdown errors *) local fun markdown_error binding = Document_Antiquotation.setup binding (Scan.succeed ()) (fn {source = src, ...} => error ("Bad Markdown structure: illegal " ^ quote (Binding.name_of binding) ^ Position.here (Position.no_range_position (#1 (Token.range_of src))))) val _ = Theory.setup (markdown_error \<^binding>\item\ #> markdown_error \<^binding>\enum\ #> markdown_error \<^binding>\descr\); in end; (* control spacing *) val _ = Theory.setup (Document_Output.antiquotation_raw \<^binding>\noindent\ (Scan.succeed ()) (fn _ => fn () => Latex.string "\\noindent") #> Document_Output.antiquotation_raw \<^binding>\smallskip\ (Scan.succeed ()) (fn _ => fn () => Latex.string "\\smallskip") #> Document_Output.antiquotation_raw \<^binding>\medskip\ (Scan.succeed ()) (fn _ => fn () => Latex.string "\\medskip") #> Document_Output.antiquotation_raw \<^binding>\bigskip\ (Scan.succeed ()) (fn _ => fn () => Latex.string "\\bigskip")); (* nested document text *) local fun nested_antiquotation name macro = Document_Output.antiquotation_raw_embedded name (Scan.lift Args.cartouche_input) (fn ctxt => fn txt => (Context_Position.reports ctxt (Document_Output.document_reports txt); Latex.macro macro (Document_Output.output_document ctxt {markdown = false} txt))); val _ = Theory.setup (nested_antiquotation \<^binding>\footnote\ "footnote" #> nested_antiquotation \<^binding>\emph\ "emph" #> nested_antiquotation \<^binding>\bold\ "textbf"); in end; (* index entries *) local val index_like = Parse.$$$ "(" |-- Parse.!!! (Parse.$$$ "is" |-- Args.name --| Parse.$$$ ")"); val index_args = Parse.enum1 "!" (Parse.embedded_input -- Scan.option index_like); fun output_text ctxt = Document_Output.output_document ctxt {markdown = false}; fun index binding def = Document_Output.antiquotation_raw binding (Scan.lift index_args) (fn ctxt => fn args => let val _ = Context_Position.reports ctxt (maps (Document_Output.document_reports o #1) args); fun make_item (txt, opt_like) = let val text = output_text ctxt txt; val like = (case opt_like of SOME s => s | NONE => Document_Antiquotation.approx_content ctxt (Input.string_of txt)); val _ = if is_none opt_like andalso Context_Position.is_visible ctxt then writeln ("(" ^ Markup.markup Markup.keyword2 "is" ^ " " ^ quote like ^ ")" ^ Position.here (Input.pos_of txt)) else (); in {text = text, like = like} end; in Latex.index_entry {items = map make_item args, def = def} end); val _ = Theory.setup (index \<^binding>\index_ref\ false #> index \<^binding>\index_def\ true); in end; (* quasi-formal text (unchecked) *) local fun report_text ctxt text = let val pos = Input.pos_of text in Context_Position.reports ctxt [(pos, Markup.language_text (Input.is_delimited text)), (pos, Markup.raw_text)] end; fun prepare_text ctxt = Input.source_content #> #1 #> Document_Antiquotation.prepare_lines ctxt; fun text_antiquotation name = - Document_Output.antiquotation_raw_embedded name (Scan.lift Args.text_input) + Document_Output.antiquotation_raw_embedded name (Scan.lift Parse.embedded_input) (fn ctxt => fn text => let val _ = report_text ctxt text; in prepare_text ctxt text |> Document_Output.output_source ctxt |> Document_Output.isabelle ctxt end); val theory_text_antiquotation = - Document_Output.antiquotation_raw_embedded \<^binding>\theory_text\ (Scan.lift Args.text_input) + Document_Output.antiquotation_raw_embedded \<^binding>\theory_text\ (Scan.lift Parse.embedded_input) (fn ctxt => fn text => let val keywords = Thy_Header.get_keywords' ctxt; val _ = report_text ctxt text; val _ = Input.source_explode text |> Token.tokenize keywords {strict = true} |> maps (Token.reports keywords) |> Context_Position.reports_text ctxt; in prepare_text ctxt text |> Token.explode0 keywords |> maps (Document_Output.output_token ctxt) |> Document_Output.isabelle ctxt end); val _ = Theory.setup (text_antiquotation \<^binding>\text\ #> text_antiquotation \<^binding>\cartouche\ #> theory_text_antiquotation); in end; (* goal state *) local fun goal_state name main = Document_Output.antiquotation_pretty name (Scan.succeed ()) (fn ctxt => fn () => Goal_Display.pretty_goal (Config.put Goal_Display.show_main_goal main ctxt) (#goal (Proof.goal (Toplevel.proof_of (Toplevel.presentation_state ctxt))))); val _ = Theory.setup (goal_state \<^binding>\goals\ true #> goal_state \<^binding>\subgoals\ false); in end; (* embedded lemma *) val _ = Theory.setup (Document_Antiquotation.setup \<^binding>\lemma\ (Scan.lift (Scan.ahead Parse.not_eof) -- Args.prop -- Scan.lift Method.parse_by) (fn {context = ctxt, source = src, argument = ((prop_tok, prop), (methods, reports))} => let val _ = Context_Position.reports ctxt reports; (* FIXME check proof!? *) val _ = ctxt |> Proof.theorem NONE (K I) [[(prop, [])]] |> Proof.global_terminal_proof methods; in Document_Output.pretty_source ctxt {embedded = false} [hd src, prop_tok] (Document_Output.pretty_term ctxt prop) end)); (* verbatim text *) val _ = Theory.setup - (Document_Output.antiquotation_verbatim_embedded \<^binding>\verbatim\ (Scan.lift Args.text_input) + (Document_Output.antiquotation_verbatim_embedded \<^binding>\verbatim\ (Scan.lift Parse.embedded_input) (fn ctxt => fn text => let val pos = Input.pos_of text; val _ = Context_Position.reports ctxt [(pos, Markup.language_verbatim (Input.is_delimited text)), (pos, Markup.raw_text)]; in #1 (Input.source_content text) end)); (* bash functions *) val _ = Theory.setup (Document_Output.antiquotation_verbatim_embedded \<^binding>\bash_function\ (Scan.lift Parse.embedded_position) Isabelle_System.check_bash_function); (* system options *) val _ = Theory.setup (Document_Output.antiquotation_verbatim_embedded \<^binding>\system_option\ (Scan.lift Parse.embedded_position) (fn ctxt => fn (name, pos) => let val _ = Completion.check_option (Options.default ()) ctxt (name, pos); in name end)); (* ML text *) local fun test_val (ml1, []) = ML_Lex.read "fn _ => (" @ ml1 @ ML_Lex.read ");" | test_val (ml1, ml2) = ML_Lex.read "fn _ => (" @ ml1 @ ML_Lex.read " : " @ ml2 @ ML_Lex.read ");"; fun test_op (ml1, ml2) = test_val (ML_Lex.read "op " @ ml1, ml2); fun test_type (ml1, []) = ML_Lex.read "val _ = NONE : (" @ ml1 @ ML_Lex.read ") option;" | test_type (ml1, ml2) = ML_Lex.read "val _ = [NONE : (" @ ml1 @ ML_Lex.read ") option, NONE : (" @ ml2 @ ML_Lex.read ") option];"; fun test_exn (ml1, []) = ML_Lex.read "fn _ => (" @ ml1 @ ML_Lex.read " : exn);" | test_exn (ml1, ml2) = ML_Lex.read "fn _ => (" @ ml1 @ ML_Lex.read " : " @ ml2 @ ML_Lex.read " -> exn);"; fun test_struct (ml, _) = ML_Lex.read "functor XXX() = struct structure XX = " @ ml @ ML_Lex.read " end;"; fun test_functor (Antiquote.Text tok :: _, _) = ML_Lex.read "ML_Env.check_functor " @ ML_Lex.read (ML_Syntax.print_string (ML_Lex.content_of tok)) | test_functor _ = raise Fail "Bad ML functor specification"; -val parse_ml0 = Args.text_input >> (fn source => ("", (source, Input.empty))); +val parse_ml0 = Parse.embedded_input >> (fn source => ("", (source, Input.empty))); val parse_ml = - Args.text_input -- Scan.optional (Args.colon |-- Args.text_input) Input.empty >> pair ""; + Parse.embedded_input -- Scan.optional (Args.colon |-- Parse.embedded_input) Input.empty >> pair ""; val parse_exn = - Args.text_input -- Scan.optional (Args.$$$ "of" |-- Args.text_input) Input.empty >> pair ""; + Parse.embedded_input -- Scan.optional (Args.$$$ "of" |-- Parse.embedded_input) Input.empty >> pair ""; val parse_type = (Parse.type_args >> (fn [] => "" | [a] => a ^ " " | bs => enclose "(" ") " (commas bs))) -- - (Args.text_input -- Scan.optional (Args.$$$ "=" |-- Args.text_input) Input.empty); + (Parse.embedded_input -- Scan.optional (Args.$$$ "=" |-- Parse.embedded_input) Input.empty); fun eval ctxt pos ml = ML_Context.eval_in (SOME ctxt) ML_Compiler.flags pos ml handle ERROR msg => error (msg ^ Position.here pos); fun make_text sep sources = let val (txt1, txt2) = apply2 (#1 o Input.source_content) sources; val is_ident = (case try List.last (Symbol.explode txt1) of NONE => false | SOME s => Symbol.is_ascii_letdig s); val txt = if txt2 = "" then txt1 else if sep = ":" andalso is_ident then txt1 ^ ": " ^ txt2 else txt1 ^ " " ^ sep ^ " " ^ txt2 in (txt, txt1) end; fun antiquotation_ml parse test kind show_kind binding index = Document_Output.antiquotation_raw binding (Scan.lift parse) (fn ctxt => fn (txt0, sources) => let val (ml1, ml2) = apply2 ML_Lex.read_source sources; val ml0 = ML_Lex.read_source (Input.string txt0); val _ = test (ml0 @ ml1, ml2) |> eval ctxt (Input.pos_of (#1 sources)); val sep = if kind = "type" then "=" else if kind = "exception" then "of" else ":"; val (txt, idx) = make_text sep sources; val main_text = Document_Output.verbatim ctxt ((if kind = "" orelse not show_kind then "" else kind ^ " ") ^ txt0 ^ txt); val index_text = (case index of NONE => [] | SOME def => let val ctxt' = Config.put Document_Antiquotation.thy_output_display false ctxt; val kind' = if kind = "" then " (ML)" else " (ML " ^ kind ^ ")"; val txt' = Document_Output.verbatim ctxt' idx @ Latex.string kind'; val like = Document_Antiquotation.approx_content ctxt' idx; in Latex.index_entry {items = [{text = txt', like = like}], def = def} end); in index_text @ main_text end); fun antiquotation_ml0 test kind = antiquotation_ml parse_ml0 test kind false; fun antiquotation_ml1 parse test kind binding = antiquotation_ml parse test kind true binding (SOME true); in val _ = Theory.setup (Latex.index_variants (antiquotation_ml0 test_val "") \<^binding>\ML\ #> Latex.index_variants (antiquotation_ml0 test_op "infix") \<^binding>\ML_infix\ #> Latex.index_variants (antiquotation_ml0 test_type "type") \<^binding>\ML_type\ #> Latex.index_variants (antiquotation_ml0 test_struct "structure") \<^binding>\ML_structure\ #> Latex.index_variants (antiquotation_ml0 test_functor "functor") \<^binding>\ML_functor\ #> antiquotation_ml0 (K []) "text" \<^binding>\ML_text\ NONE #> antiquotation_ml1 parse_ml test_val "" \<^binding>\define_ML\ #> antiquotation_ml1 parse_ml test_op "infix" \<^binding>\define_ML_infix\ #> antiquotation_ml1 parse_type test_type "type" \<^binding>\define_ML_type\ #> antiquotation_ml1 parse_exn test_exn "exception" \<^binding>\define_ML_exception\ #> antiquotation_ml1 parse_ml0 test_struct "structure" \<^binding>\define_ML_structure\ #> antiquotation_ml1 parse_ml0 test_functor "functor" \<^binding>\define_ML_functor\); end; (* URLs *) val escape_url = translate_string (fn c => if c = "%" orelse c = "#" orelse c = "^" then "\\" ^ c else c); val _ = Theory.setup (Document_Output.antiquotation_raw_embedded \<^binding>\url\ (Scan.lift Parse.embedded_input) (fn ctxt => fn source => let val url = Input.string_of source; val pos = Input.pos_of source; val delimited = Input.is_delimited source; val _ = Context_Position.reports ctxt [(pos, Markup.language_url delimited), (pos, Markup.url url)]; in Latex.macro "url" (Latex.string (escape_url url)) end)); (* formal entities *) local fun entity_antiquotation name check macro = Document_Output.antiquotation_raw name (Scan.lift Args.name_position) (fn ctxt => fn (name, pos) => let val _ = check ctxt (name, pos) in Latex.macro macro (Latex.string (Output.output name)) end); val _ = Theory.setup (entity_antiquotation \<^binding>\command\ Outer_Syntax.check_command "isacommand" #> entity_antiquotation \<^binding>\method\ Method.check_name "isa" #> entity_antiquotation \<^binding>\attribute\ Attrib.check_name "isa"); in end; end; diff --git a/src/Pure/Thy/document_output.ML b/src/Pure/Thy/document_output.ML --- a/src/Pure/Thy/document_output.ML +++ b/src/Pure/Thy/document_output.ML @@ -1,594 +1,593 @@ (* Title: Pure/Thy/document_output.ML Author: Makarius Theory document output. *) signature DOCUMENT_OUTPUT = sig val document_reports: Input.source -> Position.report list val output_document: Proof.context -> {markdown: bool} -> Input.source -> Latex.text val document_output: {markdown: bool, markup: Latex.text -> Latex.text} -> (xstring * Position.T) option * Input.source -> Toplevel.transition -> Toplevel.transition val check_comments: Proof.context -> Symbol_Pos.T list -> unit val output_token: Proof.context -> Token.T -> Latex.text val output_source: Proof.context -> string -> Latex.text type segment = {span: Command_Span.span, command: Toplevel.transition, prev_state: Toplevel.state, state: Toplevel.state} val present_thy: Options.T -> theory -> segment list -> Latex.text val pretty_term: Proof.context -> term -> Pretty.T val pretty_thm: Proof.context -> thm -> Pretty.T val isabelle: Proof.context -> Latex.text -> Latex.text val isabelle_typewriter: Proof.context -> Latex.text -> Latex.text val typewriter: Proof.context -> string -> Latex.text val verbatim: Proof.context -> string -> Latex.text val source: Proof.context -> {embedded: bool} -> Token.src -> Latex.text val pretty: Proof.context -> Pretty.T -> Latex.text val pretty_source: Proof.context -> {embedded: bool} -> Token.src -> Pretty.T -> Latex.text val pretty_items: Proof.context -> Pretty.T list -> Latex.text val pretty_items_source: Proof.context -> {embedded: bool} -> Token.src -> Pretty.T list -> Latex.text val antiquotation_pretty: binding -> 'a context_parser -> (Proof.context -> 'a -> Pretty.T) -> theory -> theory val antiquotation_pretty_embedded: binding -> 'a context_parser -> (Proof.context -> 'a -> Pretty.T) -> theory -> theory val antiquotation_pretty_source: binding -> 'a context_parser -> (Proof.context -> 'a -> Pretty.T) -> theory -> theory val antiquotation_pretty_source_embedded: binding -> 'a context_parser -> (Proof.context -> 'a -> Pretty.T) -> theory -> theory val antiquotation_raw: binding -> 'a context_parser -> (Proof.context -> 'a -> Latex.text) -> theory -> theory val antiquotation_raw_embedded: binding -> 'a context_parser -> (Proof.context -> 'a -> Latex.text) -> theory -> theory val antiquotation_verbatim: binding -> 'a context_parser -> (Proof.context -> 'a -> string) -> theory -> theory val antiquotation_verbatim_embedded: binding -> 'a context_parser -> (Proof.context -> 'a -> string) -> theory -> theory end; structure Document_Output: DOCUMENT_OUTPUT = struct (* output document source *) fun document_reports txt = let val pos = Input.pos_of txt in [(pos, Markup.language_document (Input.is_delimited txt)), (pos, Markup.plain_text)] end; fun output_comment ctxt (kind, syms) = (case kind of Comment.Comment => Input.cartouche_content syms |> output_document (ctxt |> Config.put Document_Antiquotation.thy_output_display false) {markdown = false} |> XML.enclose "%\n\\isamarkupcmt{" "%\n}" | Comment.Cancel => Symbol_Pos.cartouche_content syms |> Latex.symbols_output |> XML.enclose "%\n\\isamarkupcancel{" "}" | Comment.Latex => Latex.symbols (Symbol_Pos.cartouche_content syms) | Comment.Marker => []) and output_comment_document ctxt (comment, syms) = (case comment of SOME kind => output_comment ctxt (kind, syms) | NONE => Latex.symbols syms) and output_document_text ctxt syms = Comment.read_body syms |> maps (output_comment_document ctxt) and output_document ctxt {markdown} source = let val pos = Input.pos_of source; val syms = Input.source_explode source; val output_antiquotes = maps (Document_Antiquotation.evaluate (output_document_text ctxt) ctxt); fun output_line line = (if Markdown.line_is_item line then Latex.string "\\item " else []) @ output_antiquotes (Markdown.line_content line); fun output_block (Markdown.Par lines) = separate (XML.Text "\n") (map (Latex.block o output_line) lines) | output_block (Markdown.List {kind, body, ...}) = - Latex.environment_text (Markdown.print_kind kind) (output_blocks body) + Latex.environment (Markdown.print_kind kind) (output_blocks body) and output_blocks blocks = separate (XML.Text "\n\n") (map (Latex.block o output_block) blocks); in if Toplevel.is_skipped_proof (Toplevel.presentation_state ctxt) then [] else if markdown andalso exists (Markdown.is_control o Symbol_Pos.symbol) syms then let val ants = Antiquote.parse_comments pos syms; val reports = Antiquote.antiq_reports ants; val blocks = Markdown.read_antiquotes ants; val _ = Context_Position.reports ctxt (reports @ Markdown.reports blocks); in output_blocks blocks end else let val ants = Antiquote.parse_comments pos (trim (Symbol.is_blank o Symbol_Pos.symbol) syms); val reports = Antiquote.antiq_reports ants; val _ = Context_Position.reports ctxt (reports @ Markdown.text_reports ants); in output_antiquotes ants end end; fun document_output {markdown, markup} (loc, txt) = let fun output st = let val ctxt = Toplevel.presentation_context st; val _ = Context_Position.reports ctxt (document_reports txt); in txt |> output_document ctxt {markdown = markdown} |> markup end; in Toplevel.present (fn st => (case loc of NONE => output st | SOME (_, pos) => error ("Illegal target specification -- not a theory context" ^ Position.here pos))) o Toplevel.present_local_theory loc output end; (* output tokens with formal comments *) local val output_symbols_antiq = (fn Antiquote.Text syms => Latex.symbols_output syms | Antiquote.Control {name = (name, _), body, ...} => Latex.string (Latex.output_symbols [Symbol.encode (Symbol.Control name)]) @ Latex.symbols_output body | Antiquote.Antiq {body, ...} => XML.enclose "%\n\\isaantiq\n" "{}%\n\\endisaantiq\n" (Latex.symbols_output body)); fun output_comment_symbols ctxt {antiq} (comment, syms) = (case (comment, antiq) of (NONE, false) => Latex.symbols_output syms | (NONE, true) => Antiquote.parse_comments (#1 (Symbol_Pos.range syms)) syms |> maps output_symbols_antiq | (SOME comment, _) => output_comment ctxt (comment, syms)); fun output_body ctxt antiq bg en syms = Comment.read_body syms |> maps (output_comment_symbols ctxt {antiq = antiq}) |> XML.enclose bg en; in fun output_token ctxt tok = let fun output antiq bg en = output_body ctxt antiq bg en (Input.source_explode (Token.input_of tok)); in (case Token.kind_of tok of Token.Comment NONE => [] | Token.Comment (SOME Comment.Marker) => [] | Token.Command => output false "\\isacommand{" "}" | Token.Keyword => if Symbol.is_ascii_identifier (Token.content_of tok) then output false "\\isakeyword{" "}" else output false "" "" | Token.String => output false "{\\isachardoublequoteopen}" "{\\isachardoublequoteclose}" | Token.Alt_String => output false "{\\isacharbackquoteopen}" "{\\isacharbackquoteclose}" - | Token.Verbatim => output true "{\\isacharverbatimopen}" "{\\isacharverbatimclose}" | Token.Cartouche => output false "{\\isacartoucheopen}" "{\\isacartoucheclose}" | Token.Control control => output_body ctxt false "" "" (Antiquote.control_symbols control) | _ => output false "" "") end handle ERROR msg => error (msg ^ Position.here (Token.pos_of tok)); fun output_source ctxt s = output_body ctxt false "" "" (Symbol_Pos.explode (s, Position.none)); fun check_comments ctxt = Comment.read_body #> List.app (fn (comment, syms) => let val pos = #1 (Symbol_Pos.range syms); val _ = comment |> Option.app (fn kind => Context_Position.reports ctxt (map (pair pos) (Comment.kind_markups kind))); val _ = output_comment_symbols ctxt {antiq = false} (comment, syms); in if comment = SOME Comment.Comment then check_comments ctxt syms else () end); end; (** present theory source **) (* presentation tokens *) datatype token = Ignore | Token of Token.T | Output of Latex.text; fun token_with pred (Token tok) = pred tok | token_with _ _ = false; val white_token = token_with Document_Source.is_white; val white_comment_token = token_with Document_Source.is_white_comment; val blank_token = token_with Token.is_blank; val newline_token = token_with Token.is_newline; fun present_token ctxt tok = (case tok of Ignore => [] | Token tok => output_token ctxt tok | Output output => output); (* command spans *) type command = string * Position.T; (*name, position*) type source = (token * (string * int)) list; (*token, markup flag, meta-comment depth*) datatype span = Span of command * (source * source * source * source) * bool; fun make_span cmd src = let fun chop_newline (tok :: toks) = if newline_token (fst tok) then ([tok], toks, true) else ([], tok :: toks, false) | chop_newline [] = ([], [], false); val (((src_prefix, src_main), src_suffix1), (src_suffix2, src_appendix, newline)) = src |> chop_prefix (white_token o fst) ||>> chop_suffix (white_token o fst) ||>> chop_prefix (white_comment_token o fst) ||> chop_newline; in Span (cmd, (src_prefix, src_main, src_suffix1 @ src_suffix2, src_appendix), newline) end; (* present spans *) local fun err_bad_nesting here = error ("Bad nesting of commands in presentation" ^ here); fun edge which f (x: string option, y) = if x = y then I else (case which (x, y) of NONE => I | SOME txt => fold cons (Latex.string (f txt))); val markup_tag = YXML.output_markup o Markup.latex_tag; val markup_delim = YXML.output_markup o Markup.latex_delim; val bg_delim = #1 o markup_delim; val en_delim = #2 o markup_delim; val begin_tag = edge #2 (#1 o markup_tag); val end_tag = edge #1 (#2 o markup_tag); fun open_delim delim e = edge #2 bg_delim e #> delim #> edge #2 en_delim e; fun close_delim delim e = edge #1 bg_delim e #> delim #> edge #1 en_delim e; fun document_tag cmd_pos state state' tagging_stack = let val ctxt' = Toplevel.presentation_context state'; val nesting = Toplevel.level state' - Toplevel.level state; val (tagging, taggings) = tagging_stack; val (tag', tagging') = Document_Source.update_tagging ctxt' tagging; val tagging_stack' = if nesting = 0 andalso not (Toplevel.is_proof state) then tagging_stack else if nesting >= 0 then (tagging', replicate nesting tagging @ taggings) else (case drop (~ nesting - 1) taggings of tg :: tgs => (tg, tgs) | [] => err_bad_nesting (Position.here cmd_pos)); in (tag', tagging_stack') end; fun read_tag s = (case space_explode "%" s of ["", b] => (SOME b, NONE) | [a, b] => (NONE, SOME (a, b)) | _ => error ("Bad document_tags specification: " ^ quote s)); in fun make_command_tag options keywords = let val document_tags = map read_tag (space_explode "," (Options.string options \<^system_option>\document_tags\)); val document_tags_default = map_filter #1 document_tags; val document_tags_command = map_filter #2 document_tags; in fn cmd_name => fn state => fn state' => fn active_tag => let val keyword_tags = if cmd_name = "end" andalso Toplevel.is_end_theory state' then ["theory"] else Keyword.command_tags keywords cmd_name; val command_tags = the_list (AList.lookup (op =) document_tags_command cmd_name) @ keyword_tags @ document_tags_default; val active_tag' = (case command_tags of default_tag :: _ => SOME default_tag | [] => if Keyword.is_vacuous keywords cmd_name andalso Toplevel.is_proof state then active_tag else NONE); in active_tag' end end; fun present_span command_tag span state state' (tagging_stack, active_tag, newline, latex, present_cont) = let val ctxt' = Toplevel.presentation_context state'; val present = fold (fn (tok, (flag, 0)) => fold cons (present_token ctxt' tok) #> fold cons (Latex.string flag) | _ => I); val Span ((cmd_name, cmd_pos), srcs, span_newline) = span; val (tag', tagging_stack') = document_tag cmd_pos state state' tagging_stack; val active_tag' = if is_some tag' then Option.map #1 tag' else command_tag cmd_name state state' active_tag; val edge = (active_tag, active_tag'); val newline' = if is_none active_tag' then span_newline else newline; val latex' = latex |> end_tag edge |> close_delim (fst present_cont) edge |> snd present_cont |> open_delim (present (#1 srcs)) edge |> begin_tag edge |> present (#2 srcs); val present_cont' = if newline then (present (#3 srcs), present (#4 srcs)) else (I, present (#3 srcs) #> present (#4 srcs)); in (tagging_stack', active_tag', newline', latex', present_cont') end; fun present_trailer ((_, tags), active_tag, _, latex, present_cont) = if not (null tags) then err_bad_nesting " at end of theory" else latex |> end_tag (active_tag, NONE) |> close_delim (fst present_cont) (active_tag, NONE) |> snd present_cont; end; (* present_thy *) type segment = {span: Command_Span.span, command: Toplevel.transition, prev_state: Toplevel.state, state: Toplevel.state}; local val markup_true = "\\isamarkuptrue%\n"; val markup_false = "\\isamarkupfalse%\n"; fun command_output output tok = if Token.is_command tok then SOME (Token.put_output output tok) else NONE; fun segment_content (segment: segment) = let val toks = Command_Span.content (#span segment) in (case Toplevel.output_of (#state segment) of NONE => toks | SOME output => map_filter (command_output output) toks) end; fun output_command keywords = Scan.some (fn tok => if Token.is_command tok then let val name = Token.content_of tok; val is_document = Keyword.is_document keywords name; val is_document_raw = Keyword.is_document_raw keywords name; val flag = if is_document andalso not is_document_raw then markup_true else ""; in if is_document andalso is_some (Token.get_output tok) then SOME ((name, Token.pos_of tok), the (Token.get_output tok), flag) else NONE end else NONE); val opt_newline = Scan.option (Scan.one Token.is_newline); val ignore = Scan.depend (fn d => opt_newline |-- Scan.one Token.is_begin_ignore >> pair (d + 1)) || Scan.depend (fn d => Scan.one Token.is_end_ignore --| (if d = 0 then Scan.fail_with (K (fn () => "Bad nesting of meta-comments")) else opt_newline) >> pair (d - 1)); in fun present_thy options thy (segments: segment list) = let val keywords = Thy_Header.get_keywords thy; (* tokens *) val ignored = Scan.state --| ignore >> (fn d => [(NONE, (Ignore, ("", d)))]); val output = Scan.peek (fn d => Document_Source.improper |-- output_command keywords --| Document_Source.improper_end >> (fn (kind, body, flag) => [(SOME kind, (Output body, (flag, d)))])); val command = Scan.peek (fn d => Scan.optional (Scan.one Token.is_command_modifier ::: Document_Source.improper) [] -- Scan.one Token.is_command --| Document_Source.annotation >> (fn (cmd_mod, cmd) => map (fn tok => (NONE, (Token tok, ("", d)))) cmd_mod @ [(SOME (Token.content_of cmd, Token.pos_of cmd), (Token cmd, (markup_false, d)))])); val cmt = Scan.peek (fn d => Scan.one Document_Source.is_black_comment >> (fn tok => [(NONE, (Token tok, ("", d)))])); val other = Scan.peek (fn d => Parse.not_eof >> (fn tok => [(NONE, (Token tok, ("", d)))])); val tokens = ignored || output || command || cmt || other; (* spans *) val is_eof = fn (_, (Token x, _)) => Token.is_eof x | _ => false; val stopper = Scan.stopper (K (NONE, (Token Token.eof, ("", 0)))) is_eof; val cmd = Scan.one (is_some o fst); val non_cmd = Scan.one (is_none o fst andf not o is_eof) >> #2; val white_comments = Scan.many (white_comment_token o fst o snd); val blank = Scan.one (blank_token o fst o snd); val newline = Scan.one (newline_token o fst o snd); val before_cmd = Scan.option (newline -- white_comments) -- Scan.option (newline -- white_comments) -- Scan.option (blank -- white_comments) -- cmd; val span = Scan.repeat non_cmd -- cmd -- Scan.repeat (Scan.unless before_cmd non_cmd) -- Scan.option (newline >> (single o snd)) >> (fn (((toks1, (cmd, tok2)), toks3), tok4) => make_span (the cmd) (toks1 @ (tok2 :: (toks3 @ the_default [] tok4)))); val spans = segments |> maps segment_content |> drop_suffix Token.is_space |> Source.of_list |> Source.source' 0 Token.stopper (Scan.error (Scan.bulk tokens >> flat)) |> Source.source stopper (Scan.error (Scan.bulk span)) |> Source.exhaust; val command_results = segments |> map_filter (fn {command, state, ...} => if Toplevel.is_ignored command then NONE else SOME (command, state)); (* present commands *) val command_tag = make_command_tag options keywords; fun present_command tr span st st' = Toplevel.setmp_thread_position tr (present_span command_tag span st st'); fun present _ [] = I | present st ((span, (tr, st')) :: rest) = present_command tr span st st' #> present st' rest; in if length command_results = length spans then (([], []), NONE, true, [], (I, I)) |> present (Toplevel.init_toplevel ()) (spans ~~ command_results) |> present_trailer |> rev else error "Messed-up outer syntax for presentation" end; end; (** standard output operations **) (* pretty printing *) fun pretty_term ctxt t = Syntax.pretty_term (Proof_Context.augment t ctxt) t; fun pretty_thm ctxt = pretty_term ctxt o Thm.full_prop_of; (* default output *) fun isabelle ctxt body = if Config.get ctxt Document_Antiquotation.thy_output_display then Latex.environment "isabelle" body else Latex.macro "isa" body; fun isabelle_typewriter ctxt body = if Config.get ctxt Document_Antiquotation.thy_output_display then Latex.environment "isabellett" body else Latex.macro "isatt" body; fun typewriter ctxt s = isabelle_typewriter ctxt (Latex.string (Latex.output_ascii s)); fun verbatim ctxt = if Config.get ctxt Document_Antiquotation.thy_output_display then Document_Antiquotation.indent_lines ctxt #> typewriter ctxt else split_lines #> map (typewriter ctxt #> Latex.block) #> separate (XML.Text "\\isanewline%\n"); fun token_source ctxt {embedded} tok = if Token.is_kind Token.Cartouche tok andalso embedded andalso not (Config.get ctxt Document_Antiquotation.thy_output_source_cartouche) then Token.content_of tok else Token.unparse tok; fun is_source ctxt = Config.get ctxt Document_Antiquotation.thy_output_source orelse Config.get ctxt Document_Antiquotation.thy_output_source_cartouche; fun source ctxt embedded = Token.args_of_src #> map (token_source ctxt embedded #> Document_Antiquotation.prepare_lines ctxt) #> space_implode " " #> output_source ctxt #> isabelle ctxt; fun pretty ctxt = Document_Antiquotation.output ctxt #> Latex.string #> isabelle ctxt; fun pretty_source ctxt embedded src prt = if is_source ctxt then source ctxt embedded src else pretty ctxt prt; fun pretty_items ctxt = map (Document_Antiquotation.output ctxt #> XML.Text) #> separate (XML.Text "\\isasep\\isanewline%\n") #> isabelle ctxt; fun pretty_items_source ctxt embedded src prts = if is_source ctxt then source ctxt embedded src else pretty_items ctxt prts; (* antiquotation variants *) local fun gen_setup name embedded = if embedded then Document_Antiquotation.setup_embedded name else Document_Antiquotation.setup name; fun gen_antiquotation_pretty name embedded scan f = gen_setup name embedded scan (fn {context = ctxt, argument = x, ...} => pretty ctxt (f ctxt x)); fun gen_antiquotation_pretty_source name embedded scan f = gen_setup name embedded scan (fn {context = ctxt, source = src, argument = x} => pretty_source ctxt {embedded = embedded} src (f ctxt x)); fun gen_antiquotation_raw name embedded scan f = gen_setup name embedded scan (fn {context = ctxt, argument = x, ...} => f ctxt x); fun gen_antiquotation_verbatim name embedded scan f = gen_antiquotation_raw name embedded scan (fn ctxt => verbatim ctxt o f ctxt); in fun antiquotation_pretty name = gen_antiquotation_pretty name false; fun antiquotation_pretty_embedded name = gen_antiquotation_pretty name true; fun antiquotation_pretty_source name = gen_antiquotation_pretty_source name false; fun antiquotation_pretty_source_embedded name = gen_antiquotation_pretty_source name true; fun antiquotation_raw name = gen_antiquotation_raw name false; fun antiquotation_raw_embedded name = gen_antiquotation_raw name true; fun antiquotation_verbatim name = gen_antiquotation_verbatim name false; fun antiquotation_verbatim_embedded name = gen_antiquotation_verbatim name true; end; end; diff --git a/src/Pure/Thy/latex.ML b/src/Pure/Thy/latex.ML --- a/src/Pure/Thy/latex.ML +++ b/src/Pure/Thy/latex.ML @@ -1,251 +1,246 @@ (* Title: Pure/Thy/latex.ML Author: Makarius LaTeX output of theory sources. *) signature LATEX = sig type text = XML.body val text: string * Position.T -> text val string: string -> text val block: text -> XML.tree val output: text -> text val macro0: string -> text val macro: string -> text -> text val environment: string -> text -> text - val enclose_text: string -> string -> text -> text - val output_name: string -> string val output_ascii: string -> string val output_ascii_breakable: string -> string -> string val output_symbols: Symbol.symbol list -> string val output_syms: string -> string val symbols: Symbol_Pos.T list -> text val symbols_output: Symbol_Pos.T list -> text - val environment_text: string -> text -> text val isabelle_body: string -> text -> text val theory_entry: string -> string type index_item = {text: text, like: string} type index_entry = {items: index_item list, def: bool} val index_entry: index_entry -> text val index_variants: (binding -> bool option -> 'a -> 'a) -> binding -> 'a -> 'a val latexN: string - val latex_output: string -> string * int val latex_markup: string * Properties.T -> Markup.output - val latex_indent: string -> int -> string end; structure Latex: LATEX = struct (* text with positions *) type text = XML.body; fun text (s, pos) = if s = "" then [] else if pos = Position.none then [XML.Text s] else [XML.Elem (Position.markup pos Markup.document_latex, [XML.Text s])]; fun string s = text (s, Position.none); fun block body = XML.Elem (Markup.document_latex, body); fun output body = [XML.Elem (Markup.latex_output, body)]; fun macro0 name = [XML.Elem (Markup.latex_macro0 name, [])]; fun macro name body = [XML.Elem (Markup.latex_macro name, body)]; fun environment name body = [XML.Elem (Markup.latex_environment name, body)]; -fun enclose_text bg en body = string bg @ body @ string en; - (* output name for LaTeX macros *) val output_name = translate_string (fn "_" => "UNDERSCORE" | "'" => "PRIME" | "0" => "ZERO" | "1" => "ONE" | "2" => "TWO" | "3" => "THREE" | "4" => "FOUR" | "5" => "FIVE" | "6" => "SIX" | "7" => "SEVEN" | "8" => "EIGHT" | "9" => "NINE" | s => s); fun enclose_name bg en = enclose bg en o output_name; (* output verbatim ASCII *) val output_ascii = translate_string (fn " " => "\\ " | "\t" => "\\ " | "\n" => "\\isanewline\n" | s => s |> exists_string (fn s' => s = s') "\"#$%&',-<>\\^_`{}~" ? enclose "{\\char`\\" "}" |> suffix "{\\kern0pt}"); fun output_ascii_breakable sep = space_explode sep #> map output_ascii #> space_implode (output_ascii sep ^ "\\discretionary{}{}{}"); (* output symbols *) local val char_table = Symtab.make [("\007", "{\\isacharbell}"), ("!", "{\\isacharbang}"), ("\"", "{\\isachardoublequote}"), ("#", "{\\isacharhash}"), ("$", "{\\isachardollar}"), ("%", "{\\isacharpercent}"), ("&", "{\\isacharampersand}"), ("'", "{\\isacharprime}"), ("(", "{\\isacharparenleft}"), (")", "{\\isacharparenright}"), ("*", "{\\isacharasterisk}"), ("+", "{\\isacharplus}"), (",", "{\\isacharcomma}"), ("-", "{\\isacharminus}"), (".", "{\\isachardot}"), ("/", "{\\isacharslash}"), (":", "{\\isacharcolon}"), (";", "{\\isacharsemicolon}"), ("<", "{\\isacharless}"), ("=", "{\\isacharequal}"), (">", "{\\isachargreater}"), ("?", "{\\isacharquery}"), ("@", "{\\isacharat}"), ("[", "{\\isacharbrackleft}"), ("\\", "{\\isacharbackslash}"), ("]", "{\\isacharbrackright}"), ("^", "{\\isacharcircum}"), ("_", "{\\isacharunderscore}"), ("`", "{\\isacharbackquote}"), ("{", "{\\isacharbraceleft}"), ("|", "{\\isacharbar}"), ("}", "{\\isacharbraceright}"), ("~", "{\\isachartilde}")]; fun output_chr " " = "\\ " | output_chr "\t" = "\\ " | output_chr "\n" = "\\isanewline\n" | output_chr c = (case Symtab.lookup char_table c of SOME s => s ^ "{\\kern0pt}" | NONE => if Symbol.is_ascii_digit c then enclose "{\\isadigit{" "}}" c else c); fun output_sym sym = (case Symbol.decode sym of Symbol.Char s => output_chr s | Symbol.UTF8 s => s | Symbol.Sym s => enclose_name "{\\isasym" "}" s | Symbol.Control s => enclose_name "\\isactrl" " " s | Symbol.Malformed s => error (Symbol.malformed_msg s) | Symbol.EOF => error "Bad EOF symbol"); open Basic_Symbol_Pos; val scan_latex_length = Scan.many1 (fn (s, _) => s <> Symbol.latex andalso Symbol.not_eof s) >> (Symbol.length o map Symbol_Pos.symbol) || $$ Symbol.latex -- Scan.option (Scan.permissive Symbol_Pos.scan_cartouche "") >> K 0; val scan_latex = $$ Symbol.latex |-- Symbol_Pos.scan_cartouche_content "Embedded LaTeX: " >> (implode o map Symbol_Pos.symbol) || Scan.one (Symbol.not_eof o Symbol_Pos.symbol) >> (output_sym o Symbol_Pos.symbol); fun read scan syms = Scan.read Symbol_Pos.stopper (Scan.repeat scan) (map (rpair Position.none) syms); in fun length_symbols syms = fold Integer.add (these (read scan_latex_length syms)) 0; fun output_symbols syms = if member (op =) syms Symbol.latex then (case read scan_latex syms of SOME ss => implode ss | NONE => error ("Malformed embedded LaTeX: " ^ quote (Symbol.beginning 10 syms))) else implode (map output_sym syms); val output_syms = output_symbols o Symbol.explode; end; fun symbols syms = text (Symbol_Pos.content syms, #1 (Symbol_Pos.range syms)); fun symbols_output syms = text (output_symbols (map Symbol_Pos.symbol syms), #1 (Symbol_Pos.range syms)); (* theory presentation *) -fun environment_text name = - enclose_text - ("%\n\\begin{" ^ output_name name ^ "}%\n") - ("%\n\\end{" ^ output_name name ^ "}"); - fun isabelle_body name = - enclose_text + XML.enclose ("%\n\\begin{isabellebody}%\n\\setisabellecontext{" ^ output_syms name ^ "}%\n") "%\n\\end{isabellebody}%\n"; fun theory_entry name = "\\input{" ^ name ^ ".tex}\n\n"; (* index entries *) type index_item = {text: text, like: string}; type index_entry = {items: index_item list, def: bool}; fun index_item (item: index_item) = XML.wrap_elem ((Markup.latex_index_item, #text item), XML.string (#like item)); fun index_entry (entry: index_entry) = [XML.Elem (Markup.latex_index_entry (if #def entry then "isaindexdef" else "isaindexref"), map index_item (#items entry))]; fun index_binding NONE = I | index_binding (SOME def) = Binding.map_name (suffix (if def then "_def" else "_ref")); fun index_variants setup binding = fold (fn index => setup (index_binding index binding) index) [NONE, SOME true, SOME false]; (* print mode *) val latexN = "latex"; +local + fun latex_output str = let val syms = Symbol.explode str in (output_symbols syms, length_symbols syms) end; +val command_markup = YXML.output_markup (Markup.latex_macro "isacommand"); +val keyword_markup = YXML.output_markup (Markup.latex_macro "isakeyword"); +val indent_markup = YXML.output_markup (Markup.latex_macro "isaindent"); + +in + fun latex_markup (s, _: Properties.T) = - if s = Markup.commandN orelse s = Markup.keyword1N orelse s = Markup.keyword3N - then ("\\isacommand{", "}") - else if s = Markup.keyword2N - then ("\\isakeyword{", "}") + if member (op =) [Markup.commandN, Markup.keyword1N, Markup.keyword3N] s then command_markup + else if s = Markup.keyword2N then keyword_markup else Markup.no_output; -fun latex_indent "" _ = "" - | latex_indent s _ = enclose "\\isaindent{" "}" s; - val _ = Output.add_mode latexN latex_output (prefix Symbol.latex o cartouche); val _ = Markup.add_mode latexN latex_markup; -val _ = Pretty.add_mode latexN latex_indent; + +val _ = Pretty.add_mode latexN + (fn s => fn _ => if s = "" then s else uncurry enclose indent_markup s); end; + +end; diff --git a/src/Pure/Thy/sessions.ML b/src/Pure/Thy/sessions.ML --- a/src/Pure/Thy/sessions.ML +++ b/src/Pure/Thy/sessions.ML @@ -1,130 +1,130 @@ (* Title: Pure/Thy/sessions.ML Author: Makarius Support for session ROOT syntax. *) signature SESSIONS = sig val root_name: string val theory_name: string val command_parser: (Toplevel.transition -> Toplevel.transition) parser end; structure Sessions: SESSIONS = struct val root_name = "ROOT"; val theory_name = "Pure.Sessions"; local val theory_entry = Parse.input Parse.theory_name --| Parse.opt_keyword "global"; val theories = Parse.$$$ "theories" |-- Parse.!!! (Scan.optional Parse.options [] -- Scan.repeat1 theory_entry); val in_path = Parse.$$$ "(" |-- Parse.!!! (Parse.$$$ "in" |-- Parse.path_input --| Parse.$$$ ")"); val document_theories = Parse.$$$ "document_theories" |-- Scan.repeat1 (Parse.input Parse.theory_name); val document_files = Parse.$$$ "document_files" |-- Parse.!!! (Scan.optional in_path (Input.string "document") -- Scan.repeat1 Parse.path_input); val prune = Scan.optional (Parse.$$$ "[" |-- Parse.!!! (Parse.nat --| Parse.$$$ "]")) 0; val export_files = Parse.$$$ "export_files" |-- Parse.!!! (Scan.optional in_path (Input.string "export") -- prune -- Scan.repeat1 Parse.embedded); fun path_source source path = Input.source (Input.is_delimited source) (Path.implode path) (Input.range_of source); in val command_parser = Parse.session_name -- Scan.optional (Parse.$$$ "(" |-- Parse.!!! (Scan.repeat1 Parse.name --| Parse.$$$ ")")) [] -- Scan.optional (Parse.$$$ "in" |-- Parse.!!! Parse.path_input) (Input.string ".") -- (Parse.$$$ "=" |-- Parse.!!! (Scan.option (Parse.session_name --| Parse.!!! (Parse.$$$ "+")) -- - Scan.optional (Parse.$$$ "description" |-- Parse.!!! (Parse.input Parse.text)) Input.empty -- + Scan.optional (Parse.$$$ "description" |-- Parse.!!! (Parse.input Parse.embedded)) Input.empty -- Scan.optional (Parse.$$$ "options" |-- Parse.!!! Parse.options) [] -- Scan.optional (Parse.$$$ "sessions" |-- Parse.!!! (Scan.repeat1 Parse.session_name)) [] -- Scan.optional (Parse.$$$ "directories" |-- Parse.!!! (Scan.repeat1 Parse.path_input)) [] -- Scan.repeat theories -- Scan.optional document_theories [] -- Scan.repeat document_files -- Scan.repeat export_files)) >> (fn (((((session, _), _), dir), (((((((((parent, descr), options), sessions), directories), theories), document_theories), document_files), export_files)))) => Toplevel.keep (fn state => let val ctxt = Toplevel.context_of state; val session_dir = Resources.check_dir ctxt NONE dir; val _ = (the_list parent @ sessions) |> List.app (fn arg => ignore (Resources.check_session ctxt arg) handle ERROR msg => Output.error_message msg); val _ = Context_Position.report ctxt (Position.range_position (Symbol_Pos.range (Input.source_explode descr))) Markup.comment; val _ = (options @ maps #1 theories) |> List.app (fn (x, y) => ignore (Completion.check_option_value ctxt x y (Options.default ())) handle ERROR msg => Output.error_message msg); fun check_thy source = ignore (Resources.check_file ctxt (SOME Path.current) source) handle ERROR msg => Output.error_message msg; val _ = maps #2 theories |> List.app (fn source => let val s = Input.string_of source; val pos = Input.pos_of source; val {node_name, theory_name, ...} = Resources.import_name session session_dir s handle ERROR msg => error (msg ^ Position.here pos); val thy_path = the_default node_name (Resources.find_theory_file theory_name); in check_thy (path_source source thy_path) end); val _ = directories |> List.app (ignore o Resources.check_dir ctxt (SOME session_dir)); val _ = document_theories |> List.app (fn source => let val thy = Input.string_of source; val pos = Input.pos_of source; in (case Resources.find_theory_file thy of NONE => Output.error_message ("Unknown theory " ^ quote thy ^ Position.here pos) | SOME path => check_thy (path_source source path)) end); val _ = document_files |> List.app (fn (doc_dir, doc_files) => let val dir = Resources.check_dir ctxt (SOME session_dir) doc_dir; val _ = List.app (ignore o Resources.check_file ctxt (SOME dir)) doc_files; in () end); val _ = export_files |> List.app (fn ((export_dir, _), _) => ignore (Resources.check_path ctxt (SOME session_dir) export_dir)); in () end)); end; end; diff --git a/src/Pure/Thy/thy_header.ML b/src/Pure/Thy/thy_header.ML --- a/src/Pure/Thy/thy_header.ML +++ b/src/Pure/Thy/thy_header.ML @@ -1,213 +1,213 @@ (* Title: Pure/Thy/thy_header.ML Author: Makarius Static theory header information. *) signature THY_HEADER = sig type keywords = ((string * Position.T) * Keyword.spec) list type header = {name: string * Position.T, imports: (string * Position.T) list, keywords: keywords} val make: string * Position.T -> (string * Position.T) list -> keywords -> header val theoryN: string val bootstrap_keywords: Keyword.keywords val add_keywords: keywords -> theory -> theory val get_keywords: theory -> Keyword.keywords val get_keywords': Proof.context -> Keyword.keywords val ml_bootstrapN: string val ml_roots: string list val bootstrap_thys: string list val is_base_name: string -> bool val import_name: string -> string val args: header parser val read_tokens: Position.T -> Token.T list -> header val read: Position.T -> string -> header end; structure Thy_Header: THY_HEADER = struct (** keyword declarations **) (* header *) type keywords = ((string * Position.T) * Keyword.spec) list; type header = {name: string * Position.T, imports: (string * Position.T) list, keywords: keywords}; fun make name imports keywords : header = {name = name, imports = imports, keywords = keywords}; (* bootstrap keywords *) val chapterN = "chapter"; val sectionN = "section"; val subsectionN = "subsection"; val subsubsectionN = "subsubsection"; val paragraphN = "paragraph"; val subparagraphN = "subparagraph"; val textN = "text"; val txtN = "txt"; val text_rawN = "text_raw"; val theoryN = "theory"; val importsN = "imports"; val keywordsN = "keywords"; val abbrevsN = "abbrevs"; val beginN = "begin"; val bootstrap_keywords = Keyword.empty_keywords |> Keyword.add_keywords [(("%", \<^here>), Keyword.no_spec), (("(", \<^here>), Keyword.no_spec), ((")", \<^here>), Keyword.no_spec), ((",", \<^here>), Keyword.no_spec), (("::", \<^here>), Keyword.no_spec), (("=", \<^here>), Keyword.no_spec), (("and", \<^here>), Keyword.no_spec), ((beginN, \<^here>), Keyword.quasi_command_spec), ((importsN, \<^here>), Keyword.quasi_command_spec), ((keywordsN, \<^here>), Keyword.quasi_command_spec), ((abbrevsN, \<^here>), Keyword.quasi_command_spec), ((chapterN, \<^here>), Keyword.document_heading_spec), ((sectionN, \<^here>), Keyword.document_heading_spec), ((subsectionN, \<^here>), Keyword.document_heading_spec), ((subsubsectionN, \<^here>), Keyword.document_heading_spec), ((paragraphN, \<^here>), Keyword.document_heading_spec), ((subparagraphN, \<^here>), Keyword.document_heading_spec), ((textN, \<^here>), Keyword.document_body_spec), ((txtN, \<^here>), Keyword.document_body_spec), ((text_rawN, \<^here>), Keyword.command_spec (Keyword.document_raw, ["document"])), ((theoryN, \<^here>), Keyword.command_spec (Keyword.thy_begin, ["theory"])), (("ML", \<^here>), Keyword.command_spec (Keyword.thy_decl, ["ML"]))]; (* theory data *) structure Data = Theory_Data ( type T = Keyword.keywords; val empty = bootstrap_keywords; val merge = Keyword.merge_keywords; ); val add_keywords = Data.map o Keyword.add_keywords; val get_keywords = Data.get; val get_keywords' = get_keywords o Proof_Context.theory_of; (** concrete syntax **) (* names *) val ml_bootstrapN = "ML_Bootstrap"; val ml_roots = ["ML_Root0", "ML_Root"]; val bootstrap_thys = ["Bootstrap_Pure", "Bootstrap_ML_Bootstrap"]; fun is_base_name s = s <> "" andalso not (exists_string (member (op =) ["/", "\\", ":"]) s) fun import_name s = if String.isSuffix ".thy" s then error ("Malformed theory import: " ^ quote s) else Path.file_name (Path.explode s); (* header args *) local fun imports name = if name = Context.PureN then Scan.succeed [] else Parse.$$$ importsN |-- Parse.!!! (Scan.repeat1 Parse.theory_name); val load_command = Scan.optional (Parse.$$$ "(" |-- Parse.!!! (Parse.position Parse.name) --| Parse.$$$ ")") ("", Position.none); val keyword_spec = Parse.group (fn () => "outer syntax keyword specification") ((Parse.name -- load_command) -- Document_Source.old_tags) >> (fn ((a, b), c) => {kind = a, load_command = b, tags = c}); val keyword_decl = Scan.repeat1 Parse.string_position -- Scan.optional (Parse.$$$ "::" |-- Parse.!!! keyword_spec) Keyword.no_spec >> (fn (names, spec) => map (rpair spec) names); val abbrevs = Parse.and_list1 - (Scan.repeat1 Parse.text -- (Parse.$$$ "=" |-- Parse.!!! (Scan.repeat1 Parse.text)) + (Scan.repeat1 Parse.embedded -- (Parse.$$$ "=" |-- Parse.!!! (Scan.repeat1 Parse.embedded)) >> uncurry (map_product pair)) >> flat; val keyword_decls = Parse.and_list1 keyword_decl >> flat; in val args = Parse.theory_name :|-- (fn (name, pos) => imports name -- Scan.optional (Parse.$$$ keywordsN |-- Parse.!!! keyword_decls) [] --| (Scan.optional (Parse.$$$ abbrevsN |-- Parse.!!! abbrevs) [] -- Parse.$$$ beginN) >> (fn (imports, keywords) => make (name, pos) imports keywords)); end; (* read header *) val heading = (Parse.command_name chapterN || Parse.command_name sectionN || Parse.command_name subsectionN || Parse.command_name subsubsectionN || Parse.command_name paragraphN || Parse.command_name subparagraphN || Parse.command_name textN || Parse.command_name txtN || Parse.command_name text_rawN) -- (Document_Source.annotation |-- Parse.!!! Parse.document_source); val parse_header = (Scan.repeat heading -- Parse.command_name theoryN --| Document_Source.annotation) |-- Parse.!!! args; fun read_tokens pos toks = filter Token.is_proper toks |> Source.of_list |> Source.source Token.stopper (Scan.single (Scan.error (Parse.!!! parse_header))) |> Source.get_single |> (fn SOME (header, _) => header | NONE => error ("Unexpected end of input" ^ Position.here pos)); local fun read_header pos text = Symbol_Pos.explode (text, pos) |> Token.tokenize bootstrap_keywords {strict = false} |> read_tokens pos; val approx_length = 1024; in fun read pos text = if size text <= approx_length then read_header pos text else let val approx_text = String.substring (text, 0, approx_length) in if String.isSuffix "begin" approx_text then read_header pos text else (read_header pos approx_text handle ERROR _ => read_header pos text) end; end; end; diff --git a/src/Pure/Tools/rail.ML b/src/Pure/Tools/rail.ML --- a/src/Pure/Tools/rail.ML +++ b/src/Pure/Tools/rail.ML @@ -1,395 +1,395 @@ (* Title: Pure/Tools/rail.ML Author: Michael Kerscher, TU München Author: Makarius Railroad diagrams in LaTeX. *) signature RAIL = sig datatype rails = Cat of int * rail list and rail = Bar of rails list | Plus of rails * rails | Newline of int | Nonterminal of string | Terminal of bool * string | Antiquote of bool * Antiquote.antiq val read: Proof.context -> Input.source -> (string Antiquote.antiquote * rail) list val output_rules: Proof.context -> (string Antiquote.antiquote * rail) list -> Latex.text end; structure Rail: RAIL = struct (** lexical syntax **) (* singleton keywords *) val keywords = Symtab.make [ ("|", Markup.keyword3), ("*", Markup.keyword3), ("+", Markup.keyword3), ("?", Markup.keyword3), ("(", Markup.empty), (")", Markup.empty), ("\", Markup.keyword2), (";", Markup.keyword2), (":", Markup.keyword2), ("@", Markup.keyword1)]; (* datatype token *) datatype kind = Keyword | Ident | String | Space | Comment of Comment.kind | Antiq of Antiquote.antiq | EOF; datatype token = Token of Position.range * (kind * string); fun pos_of (Token ((pos, _), _)) = pos; fun end_pos_of (Token ((_, pos), _)) = pos; fun range_of (toks as tok :: _) = let val pos' = end_pos_of (List.last toks) in Position.range (pos_of tok, pos') end | range_of [] = Position.no_range; fun kind_of (Token (_, (k, _))) = k; fun content_of (Token (_, (_, x))) = x; fun is_proper (Token (_, (Space, _))) = false | is_proper (Token (_, (Comment _, _))) = false | is_proper _ = true; (* diagnostics *) val print_kind = fn Keyword => "rail keyword" | Ident => "identifier" | String => "single-quoted string" | Space => "white space" | Comment _ => "formal comment" | Antiq _ => "antiquotation" | EOF => "end-of-input"; fun print (Token ((pos, _), (k, x))) = (if k = EOF then print_kind k else print_kind k ^ " " ^ quote x) ^ Position.here pos; fun print_keyword x = print_kind Keyword ^ " " ^ quote x; fun reports_of_token (Token ((pos, _), (Keyword, x))) = map (pair pos) (the_list (Symtab.lookup keywords x) @ Completion.suppress_abbrevs x) | reports_of_token (Token ((pos, _), (String, _))) = [(pos, Markup.inner_string)] | reports_of_token (Token (_, (Antiq antiq, _))) = Antiquote.antiq_reports [Antiquote.Antiq antiq] | reports_of_token _ = []; (* stopper *) fun mk_eof pos = Token ((pos, Position.none), (EOF, "")); val eof = mk_eof Position.none; fun is_eof (Token (_, (EOF, _))) = true | is_eof _ = false; val stopper = Scan.stopper (fn [] => eof | toks => mk_eof (end_pos_of (List.last toks))) is_eof; (* tokenize *) local fun token k ss = [Token (Symbol_Pos.range ss, (k, Symbol_Pos.content ss))]; fun antiq_token antiq = [Token (#range antiq, (Antiq antiq, Symbol_Pos.content (#body antiq)))]; val scan_space = Scan.many1 (Symbol.is_blank o Symbol_Pos.symbol); val scan_keyword = Scan.one (Symtab.defined keywords o Symbol_Pos.symbol); val err_prefix = "Rail lexical error: "; val scan_token = scan_space >> token Space || Comment.scan_inner >> (fn (kind, ss) => token (Comment kind) ss) || Antiquote.scan_antiq >> antiq_token || scan_keyword >> (token Keyword o single) || Lexicon.scan_id >> token Ident || Symbol_Pos.scan_string_q err_prefix >> (fn (pos1, (ss, pos2)) => [Token (Position.range (pos1, pos2), (String, Symbol_Pos.content ss))]); val scan = Scan.repeats scan_token --| Symbol_Pos.!!! (fn () => err_prefix ^ "bad input") (Scan.ahead (Scan.one Symbol_Pos.is_eof)); in val tokenize = #1 o Scan.error (Scan.finite Symbol_Pos.stopper scan); end; (** parsing **) (* parser combinators *) fun !!! scan = let val prefix = "Rail syntax error"; fun get_pos [] = " (end-of-input)" | get_pos (tok :: _) = Position.here (pos_of tok); fun err (toks, NONE) = (fn () => prefix ^ get_pos toks) | err (toks, SOME msg) = (fn () => let val s = msg () in if String.isPrefix prefix s then s else prefix ^ get_pos toks ^ ": " ^ s end); in Scan.!! err scan end; fun $$$ x = Scan.one (fn tok => kind_of tok = Keyword andalso content_of tok = x) || Scan.fail_with (fn [] => (fn () => print_keyword x ^ " expected,\nbut end-of-input was found") | tok :: _ => (fn () => print_keyword x ^ " expected,\nbut " ^ print tok ^ " was found")); fun enum1 sep scan = scan ::: Scan.repeat ($$$ sep |-- !!! scan); fun enum sep scan = enum1 sep scan || Scan.succeed []; val ident = Scan.some (fn tok => if kind_of tok = Ident then SOME (content_of tok) else NONE); val string = Scan.some (fn tok => if kind_of tok = String then SOME (content_of tok) else NONE); val antiq = Scan.some (fn tok => (case kind_of tok of Antiq a => SOME a | _ => NONE)); fun RANGE scan = Scan.trace scan >> apsnd range_of; fun RANGE_APP scan = RANGE scan >> (fn (f, r) => f r); (* parse trees *) datatype trees = CAT of tree list * Position.range and tree = BAR of trees list * Position.range | STAR of (trees * trees) * Position.range | PLUS of (trees * trees) * Position.range | MAYBE of tree * Position.range | NEWLINE of Position.range | NONTERMINAL of string * Position.range | TERMINAL of (bool * string) * Position.range | ANTIQUOTE of (bool * Antiquote.antiq) * Position.range; fun reports_of_tree ctxt = if Context_Position.reports_enabled ctxt then let fun reports r = if r = Position.no_range then [] else [(Position.range_position r, Markup.expression "")]; fun trees (CAT (ts, r)) = reports r @ maps tree ts and tree (BAR (Ts, r)) = reports r @ maps trees Ts | tree (STAR ((T1, T2), r)) = reports r @ trees T1 @ trees T2 | tree (PLUS ((T1, T2), r)) = reports r @ trees T1 @ trees T2 | tree (MAYBE (t, r)) = reports r @ tree t | tree (NEWLINE r) = reports r | tree (NONTERMINAL (_, r)) = reports r | tree (TERMINAL (_, r)) = reports r | tree (ANTIQUOTE (_, r)) = reports r; in distinct (op =) o tree end else K []; local val at_mode = Scan.option ($$$ "@") >> (fn NONE => false | _ => true); fun body x = (RANGE (enum1 "|" body1) >> BAR) x and body0 x = (RANGE (enum "|" body1) >> BAR) x and body1 x = (RANGE_APP (body2 :|-- (fn a => $$$ "*" |-- !!! body4e >> (fn b => fn r => CAT ([STAR ((a, b), r)], r)) || $$$ "+" |-- !!! body4e >> (fn b => fn r => CAT ([PLUS ((a, b), r)], r)) || Scan.succeed (K a)))) x and body2 x = (RANGE (Scan.repeat1 body3) >> CAT) x and body3 x = (RANGE_APP (body4 :|-- (fn a => $$$ "?" >> K (curry MAYBE a) || Scan.succeed (K a)))) x and body4 x = ($$$ "(" |-- !!! (body0 --| $$$ ")") || RANGE_APP ($$$ "\" >> K NEWLINE || ident >> curry NONTERMINAL || at_mode -- string >> curry TERMINAL || at_mode -- antiq >> curry ANTIQUOTE)) x and body4e x = (RANGE (Scan.option body4) >> (fn (a, r) => CAT (the_list a, r))) x; val rule_name = ident >> Antiquote.Text || antiq >> Antiquote.Antiq; val rule = rule_name -- ($$$ ":" |-- !!! body) || body >> pair (Antiquote.Text ""); val rules = enum1 ";" (Scan.option rule) >> map_filter I; in fun parse_rules toks = #1 (Scan.error (Scan.finite stopper (rules --| !!! (Scan.ahead (Scan.one is_eof)))) toks); end; (** rail expressions **) (* datatype *) datatype rails = Cat of int * rail list and rail = Bar of rails list | Plus of rails * rails | Newline of int | Nonterminal of string | Terminal of bool * string | Antiquote of bool * Antiquote.antiq; fun is_newline (Newline _) = true | is_newline _ = false; (* prepare *) local fun cat rails = Cat (0, rails); val empty = cat []; fun is_empty (Cat (_, [])) = true | is_empty _ = false; fun bar [Cat (_, [rail])] = rail | bar cats = Bar cats; fun reverse_cat (Cat (y, rails)) = Cat (y, rev (map reverse rails)) and reverse (Bar cats) = Bar (map reverse_cat cats) | reverse (Plus (cat1, cat2)) = Plus (reverse_cat cat1, reverse_cat cat2) | reverse x = x; fun plus (cat1, cat2) = Plus (cat1, reverse_cat cat2); in fun prepare_trees (CAT (ts, _)) = Cat (0, map prepare_tree ts) and prepare_tree (BAR (Ts, _)) = bar (map prepare_trees Ts) | prepare_tree (STAR (Ts, _)) = let val (cat1, cat2) = apply2 prepare_trees Ts in if is_empty cat2 then plus (empty, cat1) else bar [empty, cat [plus (cat1, cat2)]] end | prepare_tree (PLUS (Ts, _)) = plus (apply2 prepare_trees Ts) | prepare_tree (MAYBE (t, _)) = bar [empty, cat [prepare_tree t]] | prepare_tree (NEWLINE _) = Newline 0 | prepare_tree (NONTERMINAL (a, _)) = Nonterminal a | prepare_tree (TERMINAL (a, _)) = Terminal a | prepare_tree (ANTIQUOTE (a, _)) = Antiquote a; end; (* read *) fun read ctxt source = let val _ = Context_Position.report ctxt (Input.pos_of source) Markup.language_rail; val toks = tokenize (Input.source_explode source); val _ = Context_Position.reports ctxt (maps reports_of_token toks); val rules = parse_rules (filter is_proper toks); val _ = Context_Position.reports ctxt (maps (reports_of_tree ctxt o #2) rules); in map (apsnd prepare_tree) rules end; (* latex output *) local fun vertical_range_cat (Cat (_, rails)) y = let val (rails', (_, y')) = fold_map (fn rail => fn (y0, y') => if is_newline rail then (Newline (y' + 1), (y' + 1, y' + 2)) else let val (rail', y0') = vertical_range rail y0; in (rail', (y0, Int.max (y0', y'))) end) rails (y, y + 1) in (Cat (y, rails'), y') end and vertical_range (Bar cats) y = let val (cats', y') = fold_map vertical_range_cat cats y in (Bar cats', Int.max (y + 1, y')) end | vertical_range (Plus (cat1, cat2)) y = let val ([cat1', cat2'], y') = fold_map vertical_range_cat [cat1, cat2] y; in (Plus (cat1', cat2'), Int.max (y + 1, y')) end | vertical_range (Newline _) y = (Newline (y + 2), y + 3) | vertical_range atom y = (atom, y + 1); in fun output_rules ctxt rules = let val output_antiq = Antiquote.Antiq #> Document_Antiquotation.evaluate Latex.symbols ctxt; fun output_text b s = Latex.string (Output.output s) |> b ? Latex.macro "isakeyword" |> Latex.macro "isa"; fun output_cat c (Cat (_, rails)) = outputs c rails and outputs c [rail] = output c rail | outputs _ rails = maps (output "") rails and output _ (Bar []) = [] | output c (Bar [cat]) = output_cat c cat | output _ (Bar (cat :: cats)) = Latex.string ("\\rail@bar\n") @ output_cat "" cat @ maps (fn Cat (y, rails) => Latex.string ("\\rail@nextbar{" ^ string_of_int y ^ "}\n") @ outputs "" rails) cats @ Latex.string "\\rail@endbar\n" | output c (Plus (cat, Cat (y, rails))) = Latex.string "\\rail@plus\n" @ output_cat c cat @ Latex.string ("\\rail@nextplus{" ^ string_of_int y ^ "}\n") @ outputs "c" rails @ Latex.string "\\rail@endplus\n" | output _ (Newline y) = Latex.string ("\\rail@cr{" ^ string_of_int y ^ "}\n") | output c (Nonterminal s) = Latex.string ("\\rail@" ^ c ^ "nont{") @ output_text false s @ Latex.string "}[]\n" | output c (Terminal (b, s)) = Latex.string ("\\rail@" ^ c ^ "term{") @ output_text b s @ Latex.string "}[]\n" | output c (Antiquote (b, a)) = Latex.string ("\\rail@" ^ c ^ (if b then "term{" else "nont{")) @ Latex.output (output_antiq a) @ Latex.string "}[]\n"; fun output_rule (name, rail) = let val (rail', y') = vertical_range rail 0; val out_name = (case name of Antiquote.Text "" => [] | Antiquote.Text s => output_text false s | Antiquote.Antiq a => output_antiq a); in Latex.string ("\\rail@begin{" ^ string_of_int y' ^ "}{") @ out_name @ Latex.string "}\n" @ output "" rail' @ Latex.string "\\rail@end\n" end; - in Latex.environment_text "railoutput" (maps output_rule rules) end; + in Latex.environment "railoutput" (maps output_rule rules) end; val _ = Theory.setup - (Document_Output.antiquotation_raw_embedded \<^binding>\rail\ (Scan.lift Args.text_input) + (Document_Output.antiquotation_raw_embedded \<^binding>\rail\ (Scan.lift Parse.embedded_input) (fn ctxt => output_rules ctxt o read ctxt)); end; end; diff --git a/src/Pure/Tools/update_cartouches.scala b/src/Pure/Tools/update_cartouches.scala --- a/src/Pure/Tools/update_cartouches.scala +++ b/src/Pure/Tools/update_cartouches.scala @@ -1,114 +1,106 @@ /* Title: Pure/Tools/update_cartouches.scala Author: Makarius Update theory syntax to use cartouches etc. */ package isabelle import scala.util.matching.Regex object Update_Cartouches { /* update cartouches */ - private val Verbatim_Body = """(?s)[ \t]*(.*?)[ \t]*""".r - val Text_Antiq: Regex = """(?s)@\{\s*text\b\s*(.+)\}""".r def update_text(content: String): String = { (try { Some(Antiquote.read(content)) } catch { case ERROR(_) => None }) match { case Some(ants) => val ants1: List[Antiquote.Antiquote] = ants.map(ant => ant match { case Antiquote.Antiq(Text_Antiq(body)) => Token.explode(Keyword.Keywords.empty, body).filterNot(_.is_space) match { case List(tok) => Antiquote.Control(Symbol.cartouche(tok.content)) case _ => ant } case _ => ant }) if (ants == ants1) content else ants1.map(_.source).mkString case None => content } } def update_cartouches(replace_text: Boolean, path: Path): Unit = { val text0 = File.read(path) // outer syntax cartouches val text1 = (for (tok <- Token.explode(Keyword.Keywords.empty, text0).iterator) yield { if (tok.kind == Token.Kind.ALT_STRING) Symbol.cartouche(tok.content) - else if (tok.kind == Token.Kind.VERBATIM) { - tok.content match { - case Verbatim_Body(s) => Symbol.cartouche(s) - case s => tok.source - } - } else tok.source } ).mkString // cartouches within presumed text tokens val text2 = if (replace_text) { (for (tok <- Token.explode(Keyword.Keywords.empty, text1).iterator) yield { if (tok.kind == Token.Kind.STRING || tok.kind == Token.Kind.CARTOUCHE) { val content = tok.content val content1 = update_text(content) if (content == content1) tok.source else if (tok.kind == Token.Kind.STRING) Outer_Syntax.quote_string(content1) else Symbol.cartouche(content1) } else tok.source }).mkString } else text1 if (text0 != text2) { Output.writeln("changing " + path) File.write_backup2(path, text2) } } /* Isabelle tool wrapper */ val isabelle_tool = Isabelle_Tool("update_cartouches", "update theory syntax to use cartouches", Scala_Project.here, args => { var replace_text = false val getopts = Getopts(""" Usage: isabelle update_cartouches [FILES|DIRS...] Options are: -t replace @{text} antiquotations within text tokens Recursively find .thy or ROOT files and update theory syntax to use - cartouches instead of old-style {* verbatim *} or `alt_string` tokens. + cartouches instead of `alt_string` tokens. Old versions of files are preserved by appending "~~". """, "t" -> (_ => replace_text = true)) val specs = getopts(args) if (specs.isEmpty) getopts.usage() for { spec <- specs file <- File.find_files(Path.explode(spec).file, file => file.getName.endsWith(".thy") || file.getName == "ROOT") } update_cartouches(replace_text, File.path(file)) }) } diff --git a/src/Pure/Tools/update_comments.scala b/src/Pure/Tools/update_comments.scala --- a/src/Pure/Tools/update_comments.scala +++ b/src/Pure/Tools/update_comments.scala @@ -1,69 +1,69 @@ /* Title: Pure/Tools/update_comments.scala Author: Makarius Update formal comments in outer syntax: \ \...\ */ package isabelle import scala.annotation.tailrec object Update_Comments { def update_comments(path: Path): Unit = { def make_comment(tok: Token): String = Symbol.comment + Symbol.space + Symbol.cartouche(Symbol.trim_blanks(tok.content)) @tailrec def update(toks: List[Token], result: List[String]): String = { toks match { case tok :: rest if tok.source == "--" || tok.source == Symbol.comment => rest.dropWhile(_.is_space) match { - case tok1 :: rest1 if tok1.is_text => + case tok1 :: rest1 if tok1.is_embedded => update(rest1, make_comment(tok1) :: result) case _ => update(rest, tok.source :: result) } case tok :: rest if tok.is_formal_comment && tok.source.startsWith(Symbol.comment) => update(rest, make_comment(tok) :: result) case tok :: rest => update(rest, tok.source :: result) case Nil => result.reverse.mkString } } val text0 = File.read(path) val text1 = update(Token.explode(Keyword.Keywords.empty, text0), Nil) if (text0 != text1) { Output.writeln("changing " + path) File.write_backup2(path, text1) } } /* Isabelle tool wrapper */ val isabelle_tool = Isabelle_Tool("update_comments", "update formal comments in outer syntax", Scala_Project.here, args => { val getopts = Getopts(""" Usage: isabelle update_comments [FILES|DIRS...] Recursively find .thy files and update formal comments in outer syntax. Old versions of files are preserved by appending "~~". """) val specs = getopts(args) if (specs.isEmpty) getopts.usage() for { spec <- specs file <- File.find_files(Path.explode(spec).file, file => file.getName.endsWith(".thy")) } update_comments(File.path(file)) }) } diff --git a/src/Pure/pure_syn.ML b/src/Pure/pure_syn.ML --- a/src/Pure/pure_syn.ML +++ b/src/Pure/pure_syn.ML @@ -1,58 +1,58 @@ (* Title: Pure/pure_syn.ML Author: Makarius Outer syntax for bootstrapping: commands that are accessible outside a regular theory context. *) signature PURE_SYN = sig val bootstrap_thy: theory end; structure Pure_Syn: PURE_SYN = struct fun document_heading (name, pos) = Outer_Syntax.command (name, pos) (name ^ " heading") (Parse.opt_target -- Parse.document_source --| Scan.option (Parse.$$$ ";") >> Document_Output.document_output {markdown = false, markup = fn body => [XML.Elem (Markup.latex_heading name, body)]}); fun document_body ((name, pos), description) = Outer_Syntax.command (name, pos) description (Parse.opt_target -- Parse.document_source >> Document_Output.document_output {markdown = true, markup = fn body => [XML.Elem (Markup.latex_body name, body)]}); val _ = List.app document_heading [("chapter", \<^here>), ("section", \<^here>), ("subsection", \<^here>), ("subsubsection", \<^here>), ("paragraph", \<^here>), ("subparagraph", \<^here>)]; val _ = List.app document_body [(("text", \<^here>), "formal comment (primary style)"), (("txt", \<^here>), "formal comment (secondary style)")]; val _ = Outer_Syntax.command ("text_raw", \<^here>) "LaTeX text (without surrounding environment)" (Parse.opt_target -- Parse.document_source >> Document_Output.document_output - {markdown = true, markup = Latex.enclose_text "%\n" "\n"}); + {markdown = true, markup = XML.enclose "%\n" "\n"}); val _ = Outer_Syntax.command ("theory", \<^here>) "begin theory" (Thy_Header.args >> (fn _ => Toplevel.init_theory (fn () => error "Missing theory initialization"))); val bootstrap_thy = Context.the_global_context (); val _ = Theory.setup (Config.put_global Outer_Syntax.bootstrap false); end; diff --git a/src/Tools/Code/code_target.ML b/src/Tools/Code/code_target.ML --- a/src/Tools/Code/code_target.ML +++ b/src/Tools/Code/code_target.ML @@ -1,796 +1,796 @@ (* Title: Tools/Code/code_target.ML Author: Florian Haftmann, TU Muenchen Generic infrastructure for target language data. *) signature CODE_TARGET = sig val cert_tyco: Proof.context -> string -> string val read_tyco: Proof.context -> string -> string datatype pretty_modules = Singleton of string * Pretty.T | Hierarchy of (string list * Pretty.T) list; val next_export: theory -> string * theory val export_code_for: ({physical: bool} * (Path.T * Position.T)) option -> string -> string -> int option -> Token.T list -> Code_Thingol.program -> bool -> Code_Symbol.T list -> local_theory -> local_theory val produce_code_for: Proof.context -> string -> string -> int option -> Token.T list -> Code_Thingol.program -> bool -> Code_Symbol.T list -> (string list * string) list * string option list val present_code_for: Proof.context -> string -> string -> int option -> Token.T list -> Code_Thingol.program -> Code_Symbol.T list * Code_Symbol.T list -> string val check_code_for: string -> bool -> Token.T list -> Code_Thingol.program -> bool -> Code_Symbol.T list -> local_theory -> local_theory val export_code: bool -> string list -> (((string * string) * ({physical: bool} * (Path.T * Position.T)) option) * Token.T list) list -> local_theory -> local_theory val export_code_cmd: bool -> string list -> (((string * string) * ({physical: bool} * Input.source) option) * Token.T list) list -> local_theory -> local_theory val produce_code: Proof.context -> bool -> string list -> string -> string -> int option -> Token.T list -> (string list * string) list * string option list val present_code: Proof.context -> string list -> Code_Symbol.T list -> string -> string -> int option -> Token.T list -> string val check_code: bool -> string list -> ((string * bool) * Token.T list) list -> local_theory -> local_theory val codeN: string val generatedN: string val code_path: Path.T -> Path.T val code_export_message: theory -> unit val export: Path.binding -> string -> theory -> theory val compilation_text: Proof.context -> string -> Code_Thingol.program -> Code_Symbol.T list -> bool -> ((string * class list) list * Code_Thingol.itype) * Code_Thingol.iterm -> (string list * string) list * string val compilation_text': Proof.context -> string -> string option -> Code_Thingol.program -> Code_Symbol.T list -> bool -> ((string * class list) list * Code_Thingol.itype) * Code_Thingol.iterm -> ((string list * string) list * string) * (Code_Symbol.T -> string option) type serializer type literals = Code_Printer.literals type language type ancestry val assert_target: theory -> string -> string val add_language: string * language -> theory -> theory val add_derived_target: string * ancestry -> theory -> theory val the_literals: Proof.context -> string -> literals val parse_args: 'a parser -> Token.T list -> 'a val default_code_width: int Config.T type ('a, 'b, 'c, 'd, 'e, 'f) symbol_attr_decl val set_identifiers: (string, string, string, string, string, string) symbol_attr_decl -> theory -> theory val set_printings: (Code_Printer.raw_const_syntax, Code_Printer.tyco_syntax, string, unit, unit, string * Code_Symbol.T list) symbol_attr_decl -> theory -> theory val add_reserved: string -> string -> theory -> theory end; structure Code_Target : CODE_TARGET = struct open Basic_Code_Symbol; open Basic_Code_Thingol; type literals = Code_Printer.literals; type ('a, 'b, 'c, 'd, 'e, 'f) symbol_attr_decl = (string * (string * 'a option) list, string * (string * 'b option) list, class * (string * 'c option) list, (class * class) * (string * 'd option) list, (class * string) * (string * 'e option) list, string * (string * 'f option) list) Code_Symbol.attr; type tyco_syntax = Code_Printer.tyco_syntax; type raw_const_syntax = Code_Printer.raw_const_syntax; (** checking and parsing of symbols **) fun cert_const ctxt const = let val _ = if Sign.declared_const (Proof_Context.theory_of ctxt) const then () else error ("No such constant: " ^ quote const); in const end; fun read_const ctxt = Code.read_const (Proof_Context.theory_of ctxt); fun cert_tyco ctxt tyco = let val _ = if Sign.declared_tyname (Proof_Context.theory_of ctxt) tyco then () else error ("No such type constructor: " ^ quote tyco); in tyco end; fun read_tyco ctxt = #1 o dest_Type o Proof_Context.read_type_name {proper = true, strict = true} ctxt; fun cert_class ctxt class = let val _ = Axclass.get_info (Proof_Context.theory_of ctxt) class; in class end; val parse_classrel_ident = Parse.class --| \<^keyword>\<\ -- Parse.class; fun cert_inst ctxt (class, tyco) = (cert_class ctxt class, cert_tyco ctxt tyco); fun read_inst ctxt (raw_tyco, raw_class) = (read_tyco ctxt raw_tyco, Proof_Context.read_class ctxt raw_class); val parse_inst_ident = Parse.name --| \<^keyword>\::\ -- Parse.class; fun cert_syms ctxt = Code_Symbol.map_attr (cert_const ctxt) (cert_tyco ctxt) (cert_class ctxt) (apply2 (cert_class ctxt)) (cert_inst ctxt) I; fun read_syms ctxt = Code_Symbol.map_attr (read_const ctxt) (read_tyco ctxt) (Proof_Context.read_class ctxt) (apply2 (Proof_Context.read_class ctxt)) (read_inst ctxt) I; fun cert_syms' ctxt = Code_Symbol.map_attr (apfst (cert_const ctxt)) (apfst (cert_tyco ctxt)) (apfst (cert_class ctxt)) ((apfst o apply2) (cert_class ctxt)) (apfst (cert_inst ctxt)) I; fun read_syms' ctxt = Code_Symbol.map_attr (apfst (read_const ctxt)) (apfst (read_tyco ctxt)) (apfst (Proof_Context.read_class ctxt)) ((apfst o apply2) (Proof_Context.read_class ctxt)) (apfst (read_inst ctxt)) I; fun check_name is_module s = let val _ = if s = "" then error "Bad empty code name" else (); val xs = Long_Name.explode s; val xs' = if is_module then map (Name.desymbolize NONE) xs else if length xs < 2 then error ("Bad code name without module component: " ^ quote s) else let val (ys, y) = split_last xs; val ys' = map (Name.desymbolize NONE) ys; val y' = Name.desymbolize NONE y; in ys' @ [y'] end; in if xs' = xs then if is_module then (xs, "") else split_last xs else error ("Invalid code name: " ^ quote s ^ "\n" ^ "better try " ^ quote (Long_Name.implode xs')) end; (** theory data **) datatype pretty_modules = Singleton of string * Pretty.T | Hierarchy of (string list * Pretty.T) list; type serializer = Token.T list -> Proof.context -> { reserved_syms: string list, identifiers: Code_Printer.identifiers, includes: (string * Pretty.T) list, class_syntax: string -> string option, tyco_syntax: string -> Code_Printer.tyco_syntax option, const_syntax: string -> Code_Printer.const_syntax option, module_name: string } -> Code_Thingol.program -> Code_Symbol.T list -> pretty_modules * (Code_Symbol.T -> string option); type language = {serializer: serializer, literals: literals, check: {env_var: string, make_destination: Path.T -> Path.T, make_command: string -> string}, evaluation_args: Token.T list}; type ancestry = (string * (Code_Thingol.program -> Code_Thingol.program)) list; val merge_ancestry : ancestry * ancestry -> ancestry = AList.join (op =) (K snd); type target = {serial: serial, language: language, ancestry: ancestry}; structure Targets = Theory_Data ( type T = (target * Code_Printer.data) Symtab.table * int; val empty = (Symtab.empty, 0); fun merge ((targets1, index1), (targets2, index2)) : T = let val targets' = Symtab.join (fn target_name => fn ((target1, data1), (target2, data2)) => if #serial target1 = #serial target2 then ({serial = #serial target1, language = #language target1, ancestry = merge_ancestry (#ancestry target1, #ancestry target2)}, Code_Printer.merge_data (data1, data2)) else error ("Incompatible targets: " ^ quote target_name)) (targets1, targets2) val index' = Int.max (index1, index2); in (targets', index') end; ); val exists_target = Symtab.defined o #1 o Targets.get; val lookup_target_data = Symtab.lookup o #1 o Targets.get; fun assert_target thy target_name = if exists_target thy target_name then target_name else error ("Unknown code target language: " ^ quote target_name); fun reset_index thy = if #2 (Targets.get thy) = 0 then NONE else SOME ((Targets.map o apsnd) (K 0) thy); val _ = Theory.setup (Theory.at_begin reset_index); fun next_export thy = let val thy' = (Targets.map o apsnd) (fn i => i + 1) thy; val i = #2 (Targets.get thy'); in ("export" ^ string_of_int i, thy') end; fun fold1 f xs = fold f (tl xs) (hd xs); fun join_ancestry thy target_name = let val _ = assert_target thy target_name; val the_target_data = the o lookup_target_data thy; val (target, this_data) = the_target_data target_name; val ancestry = #ancestry target; val modifies = rev (map snd ancestry); val modify = fold (curry (op o)) modifies I; val datas = rev (map (snd o the_target_data o fst) ancestry) @ [this_data]; val data = fold1 (fn data' => fn data => Code_Printer.merge_data (data, data')) datas; in (modify, (target, data)) end; fun allocate_target target_name target thy = let val _ = if exists_target thy target_name then error ("Attempt to overwrite existing target " ^ quote target_name) else (); in thy |> (Targets.map o apfst o Symtab.update) (target_name, (target, Code_Printer.empty_data)) end; fun add_language (target_name, language) = allocate_target target_name {serial = serial (), language = language, ancestry = []}; fun add_derived_target (target_name, initial_ancestry) thy = let val _ = if null initial_ancestry then error "Must derive from existing target(s)" else (); fun the_target_data target_name' = case lookup_target_data thy target_name' of NONE => error ("Unknown code target language: " ^ quote target_name') | SOME target_data' => target_data'; val targets = rev (map (fst o the_target_data o fst) initial_ancestry); val supremum = fold1 (fn target' => fn target => if #serial target = #serial target' then target else error "Incompatible targets") targets; val ancestries = map #ancestry targets @ [initial_ancestry]; val ancestry = fold1 (fn ancestry' => fn ancestry => merge_ancestry (ancestry, ancestry')) ancestries; in allocate_target target_name {serial = #serial supremum, language = #language supremum, ancestry = ancestry} thy end; fun map_data target_name f thy = let val _ = assert_target thy target_name; in thy |> (Targets.map o apfst o Symtab.map_entry target_name o apsnd o Code_Printer.map_data) f end; fun map_reserved target_name = map_data target_name o @{apply 3(1)}; fun map_identifiers target_name = map_data target_name o @{apply 3(2)}; fun map_printings target_name = map_data target_name o @{apply 3(3)}; (** serializers **) val codeN = "code"; val generatedN = "Generated_Code"; val code_path = Path.append (Path.basic codeN); fun code_export_message thy = writeln (Export.message thy (Path.basic codeN)); fun export binding content thy = let val thy' = thy |> Generated_Files.add_files (binding, content); val _ = Export.export thy' binding [XML.Text content]; in thy' end; local fun export_logical (file_prefix, file_pos) format pretty_modules = let fun binding path = Path.binding (path, file_pos); val prefix = code_path file_prefix; in (case pretty_modules of Singleton (ext, p) => export (binding (Path.ext ext prefix)) (format p) | Hierarchy modules => fold (fn (names, p) => export (binding (prefix + Path.make names)) (format p)) modules) #> tap code_export_message end; fun export_physical root format pretty_modules = (case pretty_modules of Singleton (_, p) => File.write root (format p) | Hierarchy code_modules => (Isabelle_System.make_directory root; List.app (fn (names, p) => File.write (Path.appends (root :: map Path.basic names)) (format p)) code_modules)); in fun export_result some_file format (pretty_code, _) thy = (case some_file of NONE => let val (file_prefix, thy') = next_export thy; in export_logical (Path.basic file_prefix, Position.none) format pretty_code thy' end | SOME ({physical = false}, file_prefix) => export_logical file_prefix format pretty_code thy | SOME ({physical = true}, (file, _)) => let val root = File.full_path (Resources.master_directory thy) file; val _ = File.check_dir (Path.dir root); val _ = export_physical root format pretty_code; in thy end); fun produce_result syms width pretty_modules = let val format = Code_Printer.format [] width in (case pretty_modules of (Singleton (_, p), deresolve) => ([([], format p)], map deresolve syms) | (Hierarchy code_modules, deresolve) => ((map o apsnd) format code_modules, map deresolve syms)) end; fun present_result selects width (pretty_modules, _) = let val format = Code_Printer.format selects width in (case pretty_modules of Singleton (_, p) => format p | Hierarchy code_modules => space_implode "\n\n" (map (format o #2) code_modules)) end; end; (** serializer usage **) (* technical aside: pretty printing width *) val default_code_width = Attrib.setup_config_int \<^binding>\default_code_width\ (K 80); fun default_width ctxt = Config.get ctxt default_code_width; val the_width = the_default o default_width; (* montage *) fun the_language ctxt = #language o fst o the o lookup_target_data (Proof_Context.theory_of ctxt); fun the_literals ctxt = #literals o the_language ctxt; fun the_evaluation_args ctxt = #evaluation_args o the_language ctxt; local fun activate_target ctxt target_name = let val thy = Proof_Context.theory_of ctxt; val (modify, (target, data)) = join_ancestry thy target_name; val serializer = (#serializer o #language) target; in { serializer = serializer, data = data, modify = modify } end; fun project_program_for_syms ctxt syms_hidden syms1 program1 = let val syms2 = subtract (op =) syms_hidden syms1; val program2 = Code_Symbol.Graph.restrict (not o member (op =) syms_hidden) program1; val unimplemented = Code_Thingol.unimplemented program2; val _ = if null unimplemented then () else error ("No code equations for " ^ commas (map (Proof_Context.markup_const ctxt) unimplemented)); val syms3 = Code_Symbol.Graph.all_succs program2 syms2; val program3 = Code_Symbol.Graph.restrict (member (op =) syms3) program2; in program3 end; fun project_includes_for_syms syms includes = let fun select_include (name, (content, cs)) = if null cs orelse exists (member (op =) syms) cs then SOME (name, content) else NONE; in map_filter select_include includes end; fun prepare_serializer ctxt target_name module_name args proto_program syms = let val { serializer, data, modify } = activate_target ctxt target_name; val printings = Code_Printer.the_printings data; val _ = if module_name = "" then () else (check_name true module_name; ()) val hidden_syms = Code_Symbol.symbols_of printings; val prepared_program = project_program_for_syms ctxt hidden_syms syms proto_program; val prepared_syms = subtract (op =) hidden_syms syms; val all_syms = Code_Symbol.Graph.all_succs proto_program syms; val includes = project_includes_for_syms all_syms (Code_Symbol.dest_module_data printings); val prepared_serializer = serializer args ctxt { reserved_syms = Code_Printer.the_reserved data, identifiers = Code_Printer.the_identifiers data, includes = includes, const_syntax = Code_Symbol.lookup_constant_data printings, tyco_syntax = Code_Symbol.lookup_type_constructor_data printings, class_syntax = Code_Symbol.lookup_type_class_data printings, module_name = module_name }; in (prepared_serializer o modify, (prepared_program, prepared_syms)) end; fun invoke_serializer ctxt target_name module_name args program all_public syms = let val (prepared_serializer, (prepared_program, prepared_syms)) = prepare_serializer ctxt target_name module_name args program syms; val exports = if all_public then [] else prepared_syms; in Code_Preproc.timed_exec "serializing" (fn () => prepared_serializer prepared_program exports) ctxt end; fun assert_module_name "" = error "Empty module name not allowed here" | assert_module_name module_name = module_name; in fun export_code_for some_file target_name module_name some_width args program all_public cs lthy = let val format = Code_Printer.format [] (the_width lthy some_width); val res = invoke_serializer lthy target_name module_name args program all_public cs; in Local_Theory.background_theory (export_result some_file format res) lthy end; fun produce_code_for ctxt target_name module_name some_width args = let val serializer = invoke_serializer ctxt target_name (assert_module_name module_name) args; in fn program => fn all_public => fn syms => produce_result syms (the_width ctxt some_width) (serializer program all_public syms) end; fun present_code_for ctxt target_name module_name some_width args = let val serializer = invoke_serializer ctxt target_name (assert_module_name module_name) args; in fn program => fn (syms, selects) => present_result selects (the_width ctxt some_width) (serializer program false syms) end; fun check_code_for target_name strict args program all_public syms lthy = let val { env_var, make_destination, make_command } = #check (the_language lthy target_name); val format = Code_Printer.format [] 80; fun ext_check p = let val destination = make_destination p; val lthy' = lthy |> Local_Theory.background_theory (export_result (SOME ({physical = true}, (destination, Position.none))) format (invoke_serializer lthy target_name generatedN args program all_public syms)); val cmd = make_command generatedN; val context_cmd = "cd " ^ File.bash_path p ^ " && " ^ cmd ^ " 2>&1"; in if Isabelle_System.bash context_cmd <> 0 then error ("Code check failed for " ^ target_name ^ ": " ^ cmd) else lthy' end; in if not (env_var = "") andalso getenv env_var = "" then if strict then error (env_var ^ " not set; cannot check code for " ^ target_name) else (warning (env_var ^ " not set; skipped checking code for " ^ target_name); lthy) else Isabelle_System.with_tmp_dir "Code_Test" ext_check end; fun dynamic_compilation_text prepared_serializer width prepared_program syms all_public ((vs, ty), t) = let val _ = if Code_Thingol.contains_dict_var t then error "Term to be evaluated contains free dictionaries" else (); val v' = singleton (Name.variant_list (map fst vs)) "a"; val vs' = (v', []) :: vs; val ty' = ITyVar v' `-> ty; val program = prepared_program |> Code_Symbol.Graph.new_node (Code_Symbol.value, Code_Thingol.Fun (((vs', ty'), [(([IVar (SOME "dummy")], t), (NONE, true))]), NONE)) |> fold (curry (perhaps o try o Code_Symbol.Graph.add_edge) Code_Symbol.value) syms; val (pretty_code, deresolve) = prepared_serializer program (if all_public then [] else [Code_Symbol.value]); val (compilation_code, [SOME value_name]) = produce_result [Code_Symbol.value] width (pretty_code, deresolve); in ((compilation_code, value_name), deresolve) end; fun compilation_text' ctxt target_name some_module_name program syms = let val width = default_width ctxt; val evaluation_args = the_evaluation_args ctxt target_name; val (prepared_serializer, (prepared_program, _)) = prepare_serializer ctxt target_name (the_default generatedN some_module_name) evaluation_args program syms; in Code_Preproc.timed_exec "serializing" (fn () => dynamic_compilation_text prepared_serializer width prepared_program syms) ctxt end; fun compilation_text ctxt target_name program syms = fst oo compilation_text' ctxt target_name NONE program syms end; (* local *) (* code generation *) fun prep_destination (location, source) = let val s = Input.string_of source val pos = Input.pos_of source val delimited = Input.is_delimited source in if location = {physical = false} then (location, Path.explode_pos (s, pos)) else let val _ = if s = "" then error ("Bad bad empty " ^ Markup.markup Markup.keyword2 "file" ^ " argument") else (); val _ = legacy_feature (Markup.markup Markup.keyword1 "export_code" ^ " with " ^ Markup.markup Markup.keyword2 "file" ^ " argument" ^ Position.here pos); val _ = Position.report pos (Markup.language_path delimited); val path = #1 (Path.explode_pos (s, pos)); val _ = Position.report pos (Markup.path (Path.implode_symbolic path)); in (location, (path, pos)) end end; fun export_code all_public cs seris lthy = let val program = Code_Thingol.consts_program lthy cs; in (seris, lthy) |-> fold (fn (((target_name, module_name), some_file), args) => export_code_for some_file target_name module_name NONE args program all_public (map Constant cs)) end; fun export_code_cmd all_public raw_cs seris lthy = let val cs = Code_Thingol.read_const_exprs lthy raw_cs; in export_code all_public cs ((map o apfst o apsnd o Option.map) prep_destination seris) lthy end; fun produce_code ctxt all_public cs target_name some_width some_module_name args = let val program = Code_Thingol.consts_program ctxt cs; in produce_code_for ctxt target_name some_width some_module_name args program all_public (map Constant cs) end; fun present_code ctxt cs syms target_name some_width some_module_name args = let val program = Code_Thingol.consts_program ctxt cs; in present_code_for ctxt target_name some_width some_module_name args program (map Constant cs, syms) end; fun check_code all_public cs seris lthy = let val program = Code_Thingol.consts_program lthy cs; in (seris, lthy) |-> fold (fn ((target_name, strict), args) => check_code_for target_name strict args program all_public (map Constant cs)) end; fun check_code_cmd all_public raw_cs seris lthy = check_code all_public (Code_Thingol.read_const_exprs lthy raw_cs) seris lthy; (** serializer configuration **) (* reserved symbol names *) fun add_reserved target_name sym thy = let val (_, (_, data)) = join_ancestry thy target_name; val _ = if member (op =) (Code_Printer.the_reserved data) sym then error ("Reserved symbol " ^ quote sym ^ " already declared") else (); in thy |> map_reserved target_name (insert (op =) sym) end; (* checking of syntax *) fun check_const_syntax ctxt target_name c syn = if Code_Printer.requires_args syn > Code.args_number (Proof_Context.theory_of ctxt) c then error ("Too many arguments in syntax for constant " ^ quote c) else Code_Printer.prep_const_syntax (Proof_Context.theory_of ctxt) (the_literals ctxt target_name) c syn; fun check_tyco_syntax ctxt target_name tyco syn = if fst syn <> Sign.arity_number (Proof_Context.theory_of ctxt) tyco then error ("Number of arguments mismatch in syntax for type constructor " ^ quote tyco) else syn; (* custom symbol names *) fun arrange_name_decls x = let fun arrange is_module (sym, target_names) = map (fn (target, some_name) => (target, (sym, Option.map (check_name is_module) some_name))) target_names; in Code_Symbol.maps_attr' (arrange false) (arrange false) (arrange false) (arrange false) (arrange false) (arrange true) x end; fun cert_name_decls ctxt = cert_syms' ctxt #> arrange_name_decls; fun read_name_decls ctxt = read_syms' ctxt #> arrange_name_decls; fun set_identifier (target_name, sym_name) = map_identifiers target_name (Code_Symbol.set_data sym_name); fun gen_set_identifiers prep_name_decl raw_name_decls thy = fold set_identifier (prep_name_decl (Proof_Context.init_global thy) raw_name_decls) thy; val set_identifiers = gen_set_identifiers cert_name_decls; val set_identifiers_cmd = gen_set_identifiers read_name_decls; (* custom printings *) fun arrange_printings prep_syms ctxt = let fun arrange check (sym, target_syns) = map (fn (target_name, some_syn) => (target_name, (sym, Option.map (check ctxt target_name sym) some_syn))) target_syns; in Code_Symbol.maps_attr' (arrange check_const_syntax) (arrange check_tyco_syntax) (arrange ((K o K o K) I)) (arrange ((K o K o K) I)) (arrange ((K o K o K) I)) (arrange (fn ctxt => fn _ => fn _ => fn (raw_content, raw_syms) => (Pretty.blk (0, Pretty.fbreaks (map Code_Printer.str (split_lines raw_content))), map (prep_syms ctxt) raw_syms))) end; fun cert_printings ctxt = cert_syms' ctxt #> arrange_printings cert_syms ctxt; fun read_printings ctxt = read_syms' ctxt #> arrange_printings read_syms ctxt; fun set_printing (target_name, sym_syn) = map_printings target_name (Code_Symbol.set_data sym_syn); fun gen_set_printings prep_print_decl raw_print_decls thy = fold set_printing (prep_print_decl (Proof_Context.init_global thy) raw_print_decls) thy; val set_printings = gen_set_printings cert_printings; val set_printings_cmd = gen_set_printings read_printings; (* concrete syntax *) fun parse_args f args = case Scan.read Token.stopper f args of SOME x => x | NONE => error "Bad serializer arguments"; (** Isar setup **) val (constantK, type_constructorK, type_classK, class_relationK, class_instanceK, code_moduleK) = (\<^keyword>\constant\, \<^keyword>\type_constructor\, \<^keyword>\type_class\, \<^keyword>\class_relation\, \<^keyword>\class_instance\, \<^keyword>\code_module\); local val parse_constants = constantK |-- Scan.repeat1 Parse.term >> map Constant; val parse_type_constructors = type_constructorK |-- Scan.repeat1 Parse.type_const >> map Type_Constructor; val parse_classes = type_classK |-- Scan.repeat1 Parse.class >> map Type_Class; val parse_class_relations = class_relationK |-- Scan.repeat1 parse_classrel_ident >> map Class_Relation; val parse_instances = class_instanceK |-- Scan.repeat1 parse_inst_ident >> map Class_Instance; val parse_simple_symbols = Scan.repeats1 (parse_constants || parse_type_constructors || parse_classes || parse_class_relations || parse_instances); fun parse_single_symbol_pragma parse_keyword parse_isa parse_target = parse_keyword |-- Parse.!!! (parse_isa --| (\<^keyword>\\\ || \<^keyword>\=>\) -- Parse.and_list1 (\<^keyword>\(\ |-- (Parse.name --| \<^keyword>\)\ -- Scan.option parse_target))); fun parse_symbol_pragma parse_const parse_tyco parse_class parse_classrel parse_inst parse_module = parse_single_symbol_pragma constantK Parse.term parse_const >> Constant || parse_single_symbol_pragma type_constructorK Parse.type_const parse_tyco >> Type_Constructor || parse_single_symbol_pragma type_classK Parse.class parse_class >> Type_Class || parse_single_symbol_pragma class_relationK parse_classrel_ident parse_classrel >> Class_Relation || parse_single_symbol_pragma class_instanceK parse_inst_ident parse_inst >> Class_Instance || parse_single_symbol_pragma code_moduleK Parse.name parse_module >> Module; fun parse_symbol_pragmas parse_const parse_tyco parse_class parse_classrel parse_inst parse_module = Parse.enum1 "|" (Parse.group (fn () => "code symbol pragma") (parse_symbol_pragma parse_const parse_tyco parse_class parse_classrel parse_inst parse_module)); val code_expr_argsP = Scan.optional (\<^keyword>\(\ |-- Parse.args --| \<^keyword>\)\) []; fun code_expr_inP (all_public, raw_cs) = Scan.repeat (\<^keyword>\in\ |-- Parse.!!! (Parse.name -- Scan.optional (\<^keyword>\module_name\ |-- Parse.name) "" -- Scan.option ((\<^keyword>\file_prefix\ >> K {physical = false} || \<^keyword>\file\ >> K {physical = true}) -- Parse.!!! Parse.path_input) -- code_expr_argsP)) >> (fn seri_args => export_code_cmd all_public raw_cs seri_args); fun code_expr_checkingP (all_public, raw_cs) = (\<^keyword>\checking\ |-- Parse.!!! (Scan.repeat (Parse.name -- (Scan.optional (\<^keyword>\?\ >> K false) true) -- code_expr_argsP))) >> (fn seri_args => check_code_cmd all_public raw_cs seri_args); in val _ = Outer_Syntax.command \<^command_keyword>\code_reserved\ "declare words as reserved for target language" (Parse.name -- Scan.repeat1 Parse.name >> (fn (target, reserveds) => (Toplevel.theory o fold (add_reserved target)) reserveds)); val _ = Outer_Syntax.command \<^command_keyword>\code_identifier\ "declare mandatory names for code symbols" (parse_symbol_pragmas Parse.name Parse.name Parse.name Parse.name Parse.name Parse.name >> (Toplevel.theory o fold set_identifiers_cmd)); val _ = Outer_Syntax.command \<^command_keyword>\code_printing\ "declare dedicated printing for code symbols" (parse_symbol_pragmas (Code_Printer.parse_const_syntax) (Code_Printer.parse_tyco_syntax) Parse.string (Parse.minus >> K ()) (Parse.minus >> K ()) - (Parse.text -- Scan.optional (\<^keyword>\for\ |-- parse_simple_symbols) []) + (Parse.embedded -- Scan.optional (\<^keyword>\for\ |-- parse_simple_symbols) []) >> (Toplevel.theory o fold set_printings_cmd)); val _ = Outer_Syntax.local_theory \<^command_keyword>\export_code\ "generate executable code for constants" (Scan.optional (\<^keyword>\open\ >> K true) false -- Scan.repeat1 Parse.term :|-- (fn args => (code_expr_checkingP args || code_expr_inP args))); end; local val parse_const_terms = Args.theory -- Scan.repeat1 Args.term >> uncurry (fn thy => map (Code.check_const thy)); fun parse_symbols keyword parse internalize mark_symbol = Scan.lift (keyword --| Args.colon) |-- Args.theory -- Scan.repeat1 parse >> uncurry (fn thy => map (mark_symbol o internalize thy)); val parse_consts = parse_symbols constantK Args.term Code.check_const Constant; val parse_types = parse_symbols type_constructorK (Scan.lift Args.name) Sign.intern_type Type_Constructor; val parse_classes = parse_symbols type_classK (Scan.lift Args.name) Sign.intern_class Type_Class; val parse_instances = parse_symbols class_instanceK (Scan.lift (Args.name --| Args.$$$ "::" -- Args.name)) (fn thy => fn (raw_tyco, raw_class) => (Sign.intern_class thy raw_tyco, Sign.intern_type thy raw_class)) Class_Instance; in val _ = Theory.setup (Document_Output.antiquotation_raw \<^binding>\code_stmts\ (parse_const_terms -- Scan.repeats (parse_consts || parse_types || parse_classes || parse_instances) -- Scan.lift (Args.parens (Args.name -- Scan.option Parse.int))) (fn ctxt => fn ((consts, symbols), (target_name, some_width)) => present_code ctxt consts symbols target_name "Example" some_width [] |> trim_line |> Document_Output.verbatim (Config.put Document_Antiquotation.thy_output_display true ctxt))); end; end; (*struct*) diff --git a/src/Tools/VSCode/extension/README.md b/src/Tools/VSCode/extension/README.md --- a/src/Tools/VSCode/extension/README.md +++ b/src/Tools/VSCode/extension/README.md @@ -1,188 +1,189 @@ # Isabelle Prover IDE support This extension connects VSCode to the Isabelle Prover IDE infrastructure: it -requires Isabelle2021-1. +requires a suitable repository version of Isabelle. The implementation is centered around the VSCode Language Server protocol, but with many add-ons that are specific to VSCode and Isabelle/PIDE. See also: - * + * + * ## Screenshot ![[Isabelle/VSCode]](https://isabelle.in.tum.de/repos/isabelle/raw-file/b565a39627bb/src/Tools/VSCode/extension/isabelle_vscode.png) ## Notable Features * Static syntax tables for Isabelle `.thy` and `.ML` files. * Implicit dependency management of sources, subject to changes of theory files within the editor, as well as external file-system events. * Implicit formal checking of theory files, using the *cursor position* of the active editor panel as indication for relevant spots. * Text overview lane with formal status of prover commands (unprocessed, running, error, warning). * Prover messages within the source text (errors/warnings and information messages). * Semantic text decorations: colors for free/bound variables, inferred types etc. * Visual indication of formal scopes and hyperlinks for formal entities. * Implicit proof state output via the VSCode message channel "Isabelle Output". * Explicit proof state output via separate GUI panel (command `isabelle.state`). * HTML preview via separate GUI panel (command `isabelle.preview`). * Rich completion information: Isabelle symbols (e.g. `\forall` or `\`), outer syntax keywords, formal entities, file-system paths, BibTeX entries etc. * Spell-checking of informal texts, including dictionary operations: via the regular completion dialog. ## Requirements ### Isabelle/VSCode Installation - * Download Isabelle2021-1 from - or any of its mirror sites. + * Download a recent Isabelle development snapshot from + * Unpack and run the main Isabelle/jEdit application as usual, to ensure that the logic image is built properly and Isabelle works as expected. * Download and install VSCode from * Open the VSCode *Extensions* view and install the following: + *Isabelle* (needs to fit to the underlying Isabelle release). + *Prettify Symbols Mode* (important for display of Isabelle symbols). + *bibtexLanguage* (optional): it gives `.bib` a formal status, thus `@{cite}` antiquotations become active for completion and hyperlinks. * Open the dialog *Preferences / User settings* and provide the following entries in the second window, where local user additions are stored: + On all platforms: `isabelle.home` needs to point to the main Isabelle directory (`$ISABELLE_HOME`). + On Windows: use drive-letter and backslashes for `isabelle.home` above. When running from a bare repository clone (not a development snapshot), `isabelle.cygwin_home` needs to point to a suitable Cygwin installation. Examples: + Linux: ``` - "isabelle.home": "/home/makarius/Isabelle2021-1" + "isabelle.home": "/home/makarius/Isabelle" ``` + Mac OS X: ``` - "isabelle.home": "/Users/makarius/Isabelle.app/Isabelle2021-1" + "isabelle.home": "/Users/makarius/Isabelle.app/Isabelle" ``` + Windows: ``` - "isabelle.home": "C:\\Users\\makarius\\Isabelle2021-1" + "isabelle.home": "C:\\Users\\makarius\\Isabelle" ``` * Restart the VSCode application to ensure that all extensions are properly initialized and user settings are updated. Afterwards VSCode should know about `.thy` files (Isabelle theories) and `.ML` files (Isabelle/ML modules). The Isabelle extension is initialized when the first Isabelle file is opened. It requires a few seconds to start up: a small popup window eventually says *"Welcome to Isabelle ..."*. If that fails, there might be something wrong with `isabelle.home` from above, or the Isabelle distribution does not fit to the version of the VSCode extension from the Marketplace. ### Support for Isabelle symbols Isabelle symbols like `\` are rendered using the extension *Prettify Symbols Mode*, which needs to be installed separately. In addition, the following user settings should be changed in the *Preferences / User settings* dialog of VSCode: ``` "prettifySymbolsMode.substitutions": [ { "language": "isabelle", "revealOn": "none", "adjustCursorMovement": true, "prettyCursor": "none", "substitutions": [] }, { "language": "isabelle-ml", "revealOn": "none", "adjustCursorMovement": true, "prettyCursor": "none", "substitutions": [] }] ``` Actual symbol replacement tables are provided by the prover process on startup, based on the usual `etc/symbols` specifications of the Isabelle installation. ### Further Preferences * Preferred Color Theme: `Light+ (default light)` * Alternative Color Theme: `Dark+ (default dark)` – with restrictions: some color combinations don't work out properly. * Recommended changes to default VSCode settings: ``` "editor.acceptSuggestionOnEnter": "off", "editor.lineNumbers": "off", "editor.renderIndentGuides": false, "editor.rulers": [80, 100], "editor.wordBasedSuggestions": true, ``` ## Known Limitations of Isabelle/VSCode * Lack of specific support for the Isabelle fonts: these need to be manually installed on the system and configured for VSCode (see also `$ISABELLE_FONTS` within the Isabelle environment). **Note:** As the Isabelle fonts continue to evolve, installed versions need to be updated according to each new Isabelle version. * Isabelle symbols are merely an optical illusion: it would be better to make them a first-class Unicode charset as in Isabelle/jEdit. * Isabelle symbol abbreviations like "-->" are not accepted by VSCode. * Lack of formal editor perspective in VSCode: only the cursor position is used (with some surrounding lines of text). * Lack of formal markup in prover messages and popups. * Lack of pretty-printing (logical line breaks) according to window and font dimensions. * Lack of GUI panels for Sledgehammer, Query operations etc. * Big theory files may cause problems to the VSCode rendering engine, since messages and text decorations are applied to the text as a whole (cf. the minimap view). diff --git a/src/Tools/VSCode/extension/package.json b/src/Tools/VSCode/extension/package.json --- a/src/Tools/VSCode/extension/package.json +++ b/src/Tools/VSCode/extension/package.json @@ -1,555 +1,555 @@ { - "name": "Isabelle2021-1", - "displayName": "Isabelle2021-1", + "name": "Isabelle", + "displayName": "Isabelle", "description": "Isabelle Prover IDE", "keywords": [ "theorem prover", "formalized mathematics", "mathematical logic", "functional programming", "document preparation" ], "icon": "isabelle.png", "version": "1.2.2", "publisher": "makarius", "license": "MIT", "repository": { "url": "https://isabelle-dev.sketis.net" }, "engines": { "vscode": "^1.34.0" }, "categories": [ "Programming Languages" ], "activationEvents": [ "onLanguage:isabelle", "onLanguage:isabelle-ml", "onLanguage:bibtex", "onCommand:isabelle.preview", "onCommand:isabelle.preview-split" ], "main": "./out/src/extension", "contributes": { "commands": [ { "command": "isabelle.state", "title": "Show State", "category": "Isabelle" }, { "command": "isabelle.preview", "title": "Open Preview", "category": "Isabelle", "icon": { "light": "./media/Preview.svg", "dark": "./media/Preview_inverse.svg" } }, { "command": "isabelle.preview-split", "title": "Open Preview (Split Editor)", "category": "Isabelle", "icon": { "light": "./media/PreviewOnRightPane_16x.svg", "dark": "./media/PreviewOnRightPane_16x_dark.svg" } }, { "command": "isabelle.include-word", "title": "Include word", "category": "Isabelle" }, { "command": "isabelle.include-word-permanently", "title": "Include word permanently", "category": "Isabelle" }, { "command": "isabelle.exclude-word", "title": "Exclude word", "category": "Isabelle" }, { "command": "isabelle.exclude-word-permanently", "title": "Exclude word permanently", "category": "Isabelle" }, { "command": "isabelle.reset-words", "title": "Reset non-permanent words", "category": "Isabelle" } ], "menus": { "editor/title": [ { "when": "editorLangId == isabelle", "command": "isabelle.preview", "group": "navigation" }, { "when": "editorLangId == isabelle-ml", "command": "isabelle.preview", "group": "navigation" }, { "when": "editorLangId == bibtex", "command": "isabelle.preview", "group": "navigation" }, { "when": "editorLangId == isabelle", "command": "isabelle.preview-split", "group": "navigation" }, { "when": "editorLangId == isabelle-ml", "command": "isabelle.preview-split", "group": "navigation" }, { "when": "editorLangId == bibtex", "command": "isabelle.preview-split", "group": "navigation" } ], "explorer/context": [ { "when": "resourceLangId == isabelle", "command": "isabelle.preview", "group": "navigation" }, { "when": "resourceLangId == isabelle-ml", "command": "isabelle.preview", "group": "navigation" }, { "when": "resourceLangId == bibtex", "command": "isabelle.preview", "group": "navigation" } ] }, "languages": [ { "id": "isabelle", "aliases": [ "Isabelle" ], "extensions": [ ".thy" ], "configuration": "./isabelle-language.json" }, { "id": "isabelle-ml", "aliases": [ "Isabelle/ML" ], "extensions": [ ".ML", ".sml", ".sig" ], "configuration": "./isabelle-ml-language.json" } ], "grammars": [ { "language": "isabelle", "scopeName": "source.isabelle", "path": "./isabelle-grammar.json" }, { "language": "isabelle-ml", "scopeName": "source.isabelle-ml", "path": "./isabelle-ml-grammar.json" } ], "configuration": { "title": "Isabelle", "properties": { "isabelle.home": { "type": "string", "default": "", "description": "Main Isabelle directory (ISABELLE_HOME)." }, "isabelle.args": { "type": "array", "items": { "type": "string" }, "default": [], "description": "Command-line arguments for isabelle vscode_server process." }, "isabelle.cygwin_root": { "type": "string", "default": "", "description": "Cygwin installation on Windows (only relevant when running directly from the Isabelle repository)." }, "isabelle.unprocessed_light_color": { "type": "string", "default": "rgba(255, 160, 160, 1.00)" }, "isabelle.unprocessed_dark_color": { "type": "string", "default": "rgba(97, 0, 97, 1.00)" }, "isabelle.unprocessed1_light_color": { "type": "string", "default": "rgba(255, 160, 160, 0.20)" }, "isabelle.unprocessed1_dark_color": { "type": "string", "default": "rgba(97, 0, 97, 0.20)" }, "isabelle.running_light_color": { "type": "string", "default": "rgba(97, 0, 97, 1.00)" }, "isabelle.running_dark_color": { "type": "string", "default": "rgba(255, 160, 160, 1.00)" }, "isabelle.running1_light_color": { "type": "string", "default": "rgba(97, 0, 97, 0.40)" }, "isabelle.running1_dark_color": { "type": "string", "default": "rgba(255, 160, 160, 0.40)" }, "isabelle.canceled_light_color": { "type": "string", "default": "rgba(255, 106, 106, 0.40)" }, "isabelle.canceled_dark_color": { "type": "string", "default": "rgba(255, 106, 106, 0.40)" }, "isabelle.bad_light_color": { "type": "string", "default": "rgba(255, 106, 106, 0.40)" }, "isabelle.bad_dark_color": { "type": "string", "default": "rgba(255, 106, 106, 0.40)" }, "isabelle.intensify_light_color": { "type": "string", "default": "rgba(255, 204, 102, 0.40)" }, "isabelle.intensify_dark_color": { "type": "string", "default": "rgba(204, 136, 0, 0.20)" }, "isabelle.markdown_bullet1_light_color": { "type": "string", "default": "rgba(218, 254, 218, 1.00)" }, "isabelle.markdown_bullet1_dark_color": { "type": "string", "default": "rgba(5, 199, 5, 0.20)" }, "isabelle.markdown_bullet2_light_color": { "type": "string", "default": "rgba(255, 240, 204, 1.00)" }, "isabelle.markdown_bullet2_dark_color": { "type": "string", "default": "rgba(204, 143, 0, 0.20)" }, "isabelle.markdown_bullet3_light_color": { "type": "string", "default": "rgba(231, 231, 255, 1.00)" }, "isabelle.markdown_bullet3_dark_color": { "type": "string", "default": "rgba(0, 0, 204, 0.20)" }, "isabelle.markdown_bullet4_light_color": { "type": "string", "default": "rgba(255, 224, 240, 1.00)" }, "isabelle.markdown_bullet4_dark_color": { "type": "string", "default": "rgba(204, 0, 105, 0.20)" }, "isabelle.quoted_light_color": { "type": "string", "default": "rgba(139, 139, 139, 0.10)" }, "isabelle.quoted_dark_color": { "type": "string", "default": "rgba(150, 150, 150, 0.15)" }, "isabelle.antiquoted_light_color": { "type": "string", "default": "rgba(255, 200, 50, 0.10)" }, "isabelle.antiquoted_dark_color": { "type": "string", "default": "rgba(255, 214, 102, 0.15)" }, "isabelle.writeln_light_color": { "type": "string", "default": "rgba(192, 192, 192, 1.0)" }, "isabelle.writeln_dark_color": { "type": "string", "default": "rgba(192, 192, 192, 1.0)" }, "isabelle.information_light_color": { "type": "string", "default": "rgba(193, 223, 238, 1.0)" }, "isabelle.information_dark_color": { "type": "string", "default": "rgba(193, 223, 238, 1.0)" }, "isabelle.warning_light_color": { "type": "string", "default": "rgba(255, 140, 0, 1.0)" }, "isabelle.warning_dark_color": { "type": "string", "default": "rgba(255, 140, 0, 1.0)" }, "isabelle.error_light_color": { "type": "string", "default": "rgba(178, 34, 34, 1.00)" }, "isabelle.error_dark_color": { "type": "string", "default": "rgba(178, 34, 34, 1.00)" }, "isabelle.spell_checker_light_color": { "type": "string", "default": "rgba(0, 0, 255, 1.0)" }, "isabelle.spell_checker_dark_color": { "type": "string", "default": "rgba(86, 156, 214, 1.00)" }, "isabelle.main_light_color": { "type": "string", "default": "rgba(0, 0, 0, 1.00)" }, "isabelle.main_dark_color": { "type": "string", "default": "rgba(212, 212, 212, 1.00)" }, "isabelle.keyword1_light_color": { "type": "string", "default": "rgba(175, 0, 219, 1.00)" }, "isabelle.keyword1_dark_color": { "type": "string", "default": "rgba(197, 134, 192, 1.00)" }, "isabelle.keyword2_light_color": { "type": "string", "default": "rgba(9, 136, 90, 1.00)" }, "isabelle.keyword2_dark_color": { "type": "string", "default": "rgba(181, 206, 168, 1.00)" }, "isabelle.keyword3_light_color": { "type": "string", "default": "rgba(38, 127, 153, 1.00)" }, "isabelle.keyword3_dark_color": { "type": "string", "default": "rgba(78, 201, 176), 1.00)" }, "isabelle.quasi_keyword_light_color": { "type": "string", "default": "rgba(153, 102, 255, 1.00)" }, "isabelle.quasi_keyword_dark_color": { "type": "string", "default": "rgba(153, 102, 255, 1.00)" }, "isabelle.improper_light_color": { "type": "string", "default": "rgba(205, 49, 49, 1.00)" }, "isabelle.improper_dark_color": { "type": "string", "default": "rgba(244, 71, 71, 1.00)" }, "isabelle.operator_light_color": { "type": "string", "default": "rgba(50, 50, 50, 1.00)" }, "isabelle.operator_dark_color": { "type": "string", "default": "rgba(212, 212, 212, 1.00)" }, "isabelle.tfree_light_color": { "type": "string", "default": "rgba(160, 32, 240, 1.00)" }, "isabelle.tfree_dark_color": { "type": "string", "default": "rgba(160, 32, 240, 1.00)" }, "isabelle.tvar_light_color": { "type": "string", "default": "rgba(160, 32, 240, 1.00)" }, "isabelle.tvar_dark_color": { "type": "string", "default": "rgba(160, 32, 240, 1.00)" }, "isabelle.free_light_color": { "type": "string", "default": "rgba(0, 0, 255, 1.00)" }, "isabelle.free_dark_color": { "type": "string", "default": "rgba(86, 156, 214, 1.00)" }, "isabelle.skolem_light_color": { "type": "string", "default": "rgba(210, 105, 30, 1.00)" }, "isabelle.skolem_dark_color": { "type": "string", "default": "rgba(210, 105, 30, 1.00)" }, "isabelle.bound_light_color": { "type": "string", "default": "rgba(0, 128, 0, 1.00)" }, "isabelle.bound_dark_color": { "type": "string", "default": "rgba(96, 139, 78, 1.00)" }, "isabelle.var_light_color": { "type": "string", "default": "rgba(0, 16, 128, 1.00)" }, "isabelle.var_dark_color": { "type": "string", "default": "rgba(156, 220, 254, 1.00)" }, "isabelle.inner_numeral_light_color": { "type": "string", "default": "rgba(9, 136, 90, 1.00)" }, "isabelle.inner_numeral_dark_color": { "type": "string", "default": "rgba(181, 206, 168, 1.00)" }, "isabelle.inner_quoted_light_color": { "type": "string", "default": "rgba(163, 21, 21, 1.00)" }, "isabelle.inner_quoted_dark_color": { "type": "string", "default": "rgba(206, 145, 120, 1.00)" }, "isabelle.inner_cartouche_light_color": { "type": "string", "default": "rgba(129, 31, 63, 1.00)" }, "isabelle.inner_cartouche_dark_color": { "type": "string", "default": "rgba(209, 105, 105, 1.00)" }, "isabelle.inner_comment_light_color": { "type": "string", "default": "rgba(0, 128, 0, 1.00)" }, "isabelle.inner_comment_dark_color": { "type": "string", "default": "rgba(96, 139, 78, 1.00)" }, "isabelle.comment1_light_color": { "type": "string", "default": "rgba(129, 31, 63, 1.00)" }, "isabelle.comment1_dark_color": { "type": "string", "default": "rgba(100, 102, 149, 1.00)" }, "isabelle.comment2_light_color": { "type": "string", "default": "rgba(209, 105, 105, 1.00)" }, "isabelle.comment2_dark_color": { "type": "string", "default": "rgba(206, 155, 120, 1.00)" }, "isabelle.comment3_light_color": { "type": "string", "default": "rgba(0, 128, 0, 1.00)" }, "isabelle.comment3_dark_color": { "type": "string", "default": "rgba(96, 139, 78, 1.00)" }, "isabelle.dynamic_light_color": { "type": "string", "default": "rgba(121, 94, 38, 1.00)" }, "isabelle.dynamic_dark_color": { "type": "string", "default": "rgba(220, 220, 170, 1.00)" }, "isabelle.class_parameter_light_color": { "type": "string", "default": "rgba(210, 105, 30, 1.00)" }, "isabelle.class_parameter_dark_color": { "type": "string", "default": "rgba(210, 105, 30, 1.00)" }, "isabelle.antiquote_light_color": { "type": "string", "default": "rgba(102, 0, 204, 1.00)" }, "isabelle.antiquote_dark_color": { "type": "string", "default": "rgba(197, 134, 192, 1.00)" }, "isabelle.raw_text_light_color": { "type": "string", "default": "rgba(102, 0, 204, 1.00)" }, "isabelle.raw_text_dark_color": { "type": "string", "default": "rgba(197, 134, 192, 1.00)" }, "isabelle.plain_text_light_color": { "type": "string", "default": "rgba(102, 0, 204, 1.00)" }, "isabelle.plain_text_dark_color": { "type": "string", "default": "rgba(197, 134, 192, 1.00)" } } } }, "scripts": { "vscode:prepublish": "tsc -p ./", "compile": "tsc -watch -p ./", "postinstall": "node ./node_modules/vscode/bin/install" }, "devDependencies": { "@types/mocha": "^2.2.48", "@types/node": "^10.11.0", "mocha": "^3.5.3", "typescript": "^3.9.9", "vscode": "^1.1.36" }, "dependencies": { "vscode-languageclient": "~5.2.1", "vscode-languageserver-types": "~3.16.0" } } diff --git a/src/Tools/jEdit/src/jedit_rendering.scala b/src/Tools/jEdit/src/jedit_rendering.scala --- a/src/Tools/jEdit/src/jedit_rendering.scala +++ b/src/Tools/jEdit/src/jedit_rendering.scala @@ -1,429 +1,428 @@ /* Title: Tools/jEdit/src/jedit_rendering.scala Author: Makarius Isabelle/jEdit-specific implementation of quasi-abstract rendering and markup interpretation. */ package isabelle.jedit import isabelle._ import java.awt.Color import javax.swing.Icon import org.gjt.sp.jedit.syntax.{Token => JEditToken} import org.gjt.sp.jedit.jEdit import scala.collection.immutable.SortedMap object JEdit_Rendering { /* make rendering */ def apply(snapshot: Document.Snapshot, model: Document_Model, options: Options): JEdit_Rendering = new JEdit_Rendering(snapshot, model, options) def text(snapshot: Document.Snapshot, formatted_body: XML.Body, results: Command.Results = Command.Results.empty): (String, JEdit_Rendering) = { val command = Command.rich_text(Document_ID.make(), results, formatted_body) val snippet = snapshot.snippet(command) val model = File_Model.empty(PIDE.session) val rendering = apply(snippet, model, PIDE.options.value) (command.source, rendering) } /* popup window bounds */ def popup_bounds: Double = (PIDE.options.real("jedit_popup_bounds") max 0.2) min 0.8 /* Isabelle/Isar token markup */ private val command_style: Map[String, Byte] = { import JEditToken._ Map[String, Byte]( Keyword.THY_END -> KEYWORD2, Keyword.PRF_ASM -> KEYWORD3, Keyword.PRF_ASM_GOAL -> KEYWORD3 ).withDefaultValue(KEYWORD1) } private val token_style: Map[Token.Kind.Value, Byte] = { import JEditToken._ Map[Token.Kind.Value, Byte]( Token.Kind.KEYWORD -> KEYWORD2, Token.Kind.IDENT -> NULL, Token.Kind.LONG_IDENT -> NULL, Token.Kind.SYM_IDENT -> NULL, Token.Kind.VAR -> NULL, Token.Kind.TYPE_IDENT -> NULL, Token.Kind.TYPE_VAR -> NULL, Token.Kind.NAT -> NULL, Token.Kind.FLOAT -> NULL, Token.Kind.SPACE -> NULL, Token.Kind.STRING -> LITERAL1, Token.Kind.ALT_STRING -> LITERAL2, - Token.Kind.VERBATIM -> COMMENT3, Token.Kind.CARTOUCHE -> COMMENT3, Token.Kind.CONTROL -> COMMENT3, Token.Kind.INFORMAL_COMMENT -> COMMENT1, Token.Kind.FORMAL_COMMENT -> COMMENT1, Token.Kind.ERROR -> INVALID ).withDefaultValue(NULL) } def token_markup(syntax: Outer_Syntax, token: Token): Byte = if (token.is_command) command_style(syntax.keywords.kinds.getOrElse(token.content, "")) else if (token.is_delimiter) JEditToken.OPERATOR else token_style(token.kind) /* Isabelle/ML token markup */ private val ml_token_style: Map[ML_Lex.Kind.Value, Byte] = { import JEditToken._ Map[ML_Lex.Kind.Value, Byte]( ML_Lex.Kind.KEYWORD -> NULL, ML_Lex.Kind.IDENT -> NULL, ML_Lex.Kind.LONG_IDENT -> NULL, ML_Lex.Kind.TYPE_VAR -> NULL, ML_Lex.Kind.WORD -> DIGIT, ML_Lex.Kind.INT -> DIGIT, ML_Lex.Kind.REAL -> DIGIT, ML_Lex.Kind.CHAR -> LITERAL2, ML_Lex.Kind.STRING -> LITERAL1, ML_Lex.Kind.SPACE -> NULL, ML_Lex.Kind.INFORMAL_COMMENT -> COMMENT1, ML_Lex.Kind.FORMAL_COMMENT -> COMMENT1, ML_Lex.Kind.ANTIQ -> NULL, ML_Lex.Kind.ANTIQ_START -> LITERAL4, ML_Lex.Kind.ANTIQ_STOP -> LITERAL4, ML_Lex.Kind.ANTIQ_OTHER -> NULL, ML_Lex.Kind.ANTIQ_STRING -> NULL, ML_Lex.Kind.ANTIQ_ALT_STRING -> NULL, ML_Lex.Kind.ANTIQ_CARTOUCHE -> NULL, ML_Lex.Kind.ERROR -> INVALID ).withDefaultValue(NULL) } def ml_token_markup(token: ML_Lex.Token): Byte = if (!token.is_keyword) ml_token_style(token.kind) else if (token.is_delimiter) JEditToken.OPERATOR else if (ML_Lex.keywords2(token.source)) JEditToken.KEYWORD2 else if (ML_Lex.keywords3(token.source)) JEditToken.KEYWORD3 else JEditToken.KEYWORD1 /* markup elements */ private val indentation_elements = Markup.Elements(Markup.Command_Indent.name) private val breakpoint_elements = Markup.Elements(Markup.ML_BREAKPOINT) private val highlight_elements = Markup.Elements(Markup.EXPRESSION, Markup.CITATION, Markup.LANGUAGE, Markup.ML_TYPING, Markup.TOKEN_RANGE, Markup.ENTITY, Markup.PATH, Markup.DOC, Markup.URL, Markup.SORTING, Markup.TYPING, Markup.CLASS_PARAMETER, Markup.FREE, Markup.SKOLEM, Markup.BOUND, Markup.VAR, Markup.TFREE, Markup.TVAR, Markup.ML_BREAKPOINT, Markup.MARKDOWN_PARAGRAPH, Markup.MARKDOWN_ITEM, Markup.Markdown_List.name) private val hyperlink_elements = Markup.Elements(Markup.ENTITY, Markup.PATH, Markup.EXPORT_PATH, Markup.DOC, Markup.URL, Markup.POSITION, Markup.CITATION) private val gutter_elements = Markup.Elements(Markup.WRITELN, Markup.INFORMATION, Markup.WARNING, Markup.LEGACY, Markup.ERROR) private val squiggly_elements = Markup.Elements(Markup.WRITELN, Markup.INFORMATION, Markup.WARNING, Markup.LEGACY, Markup.ERROR) private val line_background_elements = Markup.Elements(Markup.WRITELN_MESSAGE, Markup.STATE_MESSAGE, Markup.INFORMATION_MESSAGE, Markup.TRACING_MESSAGE, Markup.WARNING_MESSAGE, Markup.LEGACY_MESSAGE, Markup.ERROR_MESSAGE) private val separator_elements = Markup.Elements(Markup.SEPARATOR) private val bullet_elements = Markup.Elements(Markup.BULLET, Markup.ML_BREAKPOINT) private val fold_depth_elements = Markup.Elements(Markup.TEXT_FOLD, Markup.GOAL, Markup.SUBGOAL) } class JEdit_Rendering(snapshot: Document.Snapshot, model: Document_Model, options: Options) extends Rendering(snapshot, options, PIDE.session) { override def get_text(range: Text.Range): Option[String] = model.get_text(range) /* colors */ def color(s: String): Color = if (s == "main_color") main_color else Color_Value(options.string(s)) def color(c: Rendering.Color.Value): Color = _rendering_colors(c) lazy val _rendering_colors: Map[Rendering.Color.Value, Color] = Rendering.Color.values.iterator.map(c => c -> color(c.toString + "_color")).toMap val main_color = jEdit.getColorProperty("view.fgColor") val outdated_color = color("outdated_color") val bullet_color = color("bullet_color") val tooltip_color = color("tooltip_color") val spell_checker_color = color("spell_checker_color") val entity_ref_color = color("entity_ref_color") val breakpoint_disabled_color = color("breakpoint_disabled_color") val breakpoint_enabled_color = color("breakpoint_enabled_color") val caret_debugger_color = color("caret_debugger_color") val highlight_color = color("highlight_color") val hyperlink_color = color("hyperlink_color") val active_hover_color = color("active_hover_color") val caret_invisible_color = color("caret_invisible_color") val completion_color = color("completion_color") val search_color = color("search_color") /* indentation */ def indentation(range: Text.Range): Int = snapshot.select(range, JEdit_Rendering.indentation_elements, _ => { case Text.Info(_, XML.Elem(Markup.Command_Indent(i), _)) => Some(i) case _ => None }).headOption.map(_.info).getOrElse(0) /* breakpoints */ def breakpoint(range: Text.Range): Option[(Command, Long)] = if (snapshot.is_outdated) None else snapshot.select(range, JEdit_Rendering.breakpoint_elements, command_states => { case Text.Info(_, Protocol.ML_Breakpoint(breakpoint)) => command_states match { case st :: _ => Some((st.command, breakpoint)) case _ => None } case _ => None }).headOption.map(_.info) /* caret focus */ def entity_ref(range: Text.Range, focus: Rendering.Focus): List[Text.Info[Color]] = snapshot.select(range, Rendering.entity_elements, _ => { case Text.Info(_, XML.Elem(Markup.Entity.Ref(i), _)) if focus(i) => Some(entity_ref_color) case _ => None }) /* highlighted area */ def highlight(range: Text.Range): Option[Text.Info[Color]] = snapshot.select(range, JEdit_Rendering.highlight_elements, _ => { case info => Some(Text.Info(snapshot.convert(info.range), highlight_color)) }).headOption.map(_.info) /* hyperlinks */ def hyperlink(range: Text.Range): Option[Text.Info[PIDE.editor.Hyperlink]] = { snapshot.cumulate[Vector[Text.Info[PIDE.editor.Hyperlink]]]( range, Vector.empty, JEdit_Rendering.hyperlink_elements, _ => { case (links, Text.Info(info_range, XML.Elem(Markup.Path(name), _))) => val file = perhaps_append_file(snapshot.node_name, name) val link = PIDE.editor.hyperlink_file(true, file) Some(links :+ Text.Info(snapshot.convert(info_range), link)) case (links, Text.Info(info_range, XML.Elem(Markup.Export_Path(name), _))) => val link = PIDE.editor.hyperlink_file(true, Isabelle_Export.vfs_prefix + name) Some(links :+ Text.Info(snapshot.convert(info_range), link)) case (links, Text.Info(info_range, XML.Elem(Markup.Doc(name), _))) => PIDE.editor.hyperlink_doc(name).map(link => (links :+ Text.Info(snapshot.convert(info_range), link))) case (links, Text.Info(info_range, XML.Elem(Markup.Url(name), _))) => val link = PIDE.editor.hyperlink_url(name) Some(links :+ Text.Info(snapshot.convert(info_range), link)) case (links, Text.Info(info_range, XML.Elem(Markup(Markup.ENTITY, props), _))) => val opt_link = PIDE.editor.hyperlink_def_position(true, snapshot, props) opt_link.map(link => links :+ Text.Info(snapshot.convert(info_range), link)) case (links, Text.Info(info_range, XML.Elem(Markup(Markup.POSITION, props), _))) => val opt_link = PIDE.editor.hyperlink_position(true, snapshot, props) opt_link.map(link => links :+ Text.Info(snapshot.convert(info_range), link)) case (links, Text.Info(info_range, XML.Elem(Markup.Citation(name), _))) => val opt_link = Document_Model.bibtex_entries_iterator().collectFirst( { case Text.Info(entry_range, (entry, model)) if entry == name => PIDE.editor.hyperlink_model(true, model, entry_range.start) }) opt_link.map(link => links :+ Text.Info(snapshot.convert(info_range), link)) case _ => None }) match { case Text.Info(_, _ :+ info) :: _ => Some(info) case _ => None } } def hyperlink_entity(range: Text.Range): Option[Text.Info[PIDE.editor.Hyperlink]] = { snapshot.cumulate[Vector[Text.Info[PIDE.editor.Hyperlink]]]( range, Vector.empty, Rendering.entity_elements, _ => { case (links, Text.Info(info_range, XML.Elem(Markup(Markup.ENTITY, props), _))) => val opt_link = PIDE.editor.hyperlink_def_position(true, snapshot, props) opt_link.map(link => links :+ Text.Info(snapshot.convert(info_range), link)) case _ => None }) match { case Text.Info(_, _ :+ info) :: _ => Some(info) case _ => None } } /* active elements */ def active(range: Text.Range): Option[Text.Info[XML.Elem]] = snapshot.select(range, Rendering.active_elements, command_states => { case Text.Info(info_range, elem) => if (elem.name == Markup.DIALOG) { elem match { case Protocol.Dialog(_, serial, _) if !command_states.exists(st => st.results.defined(serial)) => Some(Text.Info(snapshot.convert(info_range), elem)) case _ => None } } else Some(Text.Info(snapshot.convert(info_range), elem)) }).headOption.map(_.info) /* tooltips */ def tooltip_margin: Int = options.int("jedit_tooltip_margin") override def timing_threshold: Double = options.real("jedit_timing_threshold") def tooltip(range: Text.Range, control: Boolean): Option[Text.Info[XML.Body]] = { val elements = if (control) Rendering.tooltip_elements else Rendering.tooltip_message_elements tooltips(elements, range).map(info => info.map(Pretty.fbreaks)) } lazy val tooltip_close_icon: Icon = JEdit_Lib.load_icon(options.string("tooltip_close_icon")) lazy val tooltip_detach_icon: Icon = JEdit_Lib.load_icon(options.string("tooltip_detach_icon")) /* gutter */ private def gutter_message_pri(msg: XML.Tree): Int = if (Protocol.is_error(msg)) Rendering.error_pri else if (Protocol.is_legacy(msg)) Rendering.legacy_pri else if (Protocol.is_warning(msg)) Rendering.warning_pri else if (Protocol.is_information(msg)) Rendering.information_pri else 0 private lazy val gutter_message_content = Map( Rendering.information_pri -> (JEdit_Lib.load_icon(options.string("gutter_information_icon")), color(Rendering.Color.information_message)), Rendering.warning_pri -> (JEdit_Lib.load_icon(options.string("gutter_warning_icon")), color(Rendering.Color.warning_message)), Rendering.legacy_pri -> (JEdit_Lib.load_icon(options.string("gutter_legacy_icon")), color(Rendering.Color.legacy_message)), Rendering.error_pri -> (JEdit_Lib.load_icon(options.string("gutter_error_icon")), color(Rendering.Color.error_message))) def gutter_content(range: Text.Range): Option[(Icon, Color)] = { val pris = snapshot.cumulate[Int](range, 0, JEdit_Rendering.gutter_elements, _ => { case (pri, Text.Info(_, elem)) => Some(pri max gutter_message_pri(elem)) case _ => None }).map(_.info) gutter_message_content.get(pris.foldLeft(0)(_ max _)) } /* message output */ def squiggly_underline(range: Text.Range): List[Text.Info[Rendering.Color.Value]] = message_underline_color(JEdit_Rendering.squiggly_elements, range) def line_background(range: Text.Range): Option[(Rendering.Color.Value, Boolean)] = { val results = snapshot.cumulate[Int](range, 0, JEdit_Rendering.line_background_elements, _ => { case (pri, Text.Info(_, elem)) => Some(pri max Rendering.message_pri(elem.name)) }) val pri = results.foldLeft(0) { case (p1, Text.Info(_, p2)) => p1 max p2 } Rendering.message_background_color.get(pri).map(message_color => { val is_separator = snapshot.cumulate[Boolean](range, false, JEdit_Rendering.separator_elements, _ => { case _ => Some(true) }).exists(_.info) (message_color, is_separator) }) } /* text color */ def text_color(range: Text.Range, current_color: Color): List[Text.Info[Color]] = { if (current_color == Syntax_Style.hidden_color) List(Text.Info(range, current_color)) else snapshot.cumulate(range, current_color, Rendering.text_color_elements, _ => { case (_, Text.Info(_, elem)) => Rendering.text_color.get(elem.name).map(color) }) } /* virtual bullets */ def bullet(range: Text.Range): List[Text.Info[Color]] = snapshot.select(range, JEdit_Rendering.bullet_elements, _ => { case Text.Info(_, Protocol.ML_Breakpoint(breakpoint)) => PIDE.session.debugger.active_breakpoint_state(breakpoint).map(b => if (b) breakpoint_enabled_color else breakpoint_disabled_color) case _ => Some(bullet_color) }) /* text folds */ def fold_depth(range: Text.Range): List[Text.Info[Int]] = snapshot.cumulate[Int](range, 0, JEdit_Rendering.fold_depth_elements, _ => { case (depth, _) => Some(depth + 1) }) }
  • """.r }) def isabelle_dev: Archive = - archive(Url("https://mailmanbroy.in.tum.de/pipermail/isabelle-dev")) + archive(Url("https://mailmanbroy.in.tum.de/pipermail/isabelle-dev"), + new Msg_Format { val regex: Regex = """