From 8c2a19dbf52c13f8bfc9356740ad4687efa0318e Mon Sep 17 00:00:00 2001 From: Julien Moutinho Date: Wed, 15 May 2024 18:55:45 +0200 Subject: [PATCH 1/1] init --- .chglog/CHANGELOG.tpl.md | 37 + .chglog/config.yml | 35 + .envrc | 1 + .gitignore | 24 + .hlint.yaml | 30 + .reuse/dep5 | 12 + ChangeLog.md | 3 + LICENSES/AGPL-3.0-or-later.txt | 235 ++++ LICENSES/BSD-3-Clause.txt | 11 + LICENSES/CC0-1.0.txt | 121 ++ Makefile | 93 ++ Readme.md | 52 + flake.lock | 282 +++++ flake.nix | 79 ++ fourmolu.yaml | 52 + literate-phylomemy.cabal | 197 +++ src/Clustering/FrequentItemSet/BruteForce.hs | 204 ++++ src/Clustering/FrequentItemSet/LCM.hs | 676 +++++++++++ src/Clustering/FrequentItemSet/References.hs | 31 + src/Clustering/UnionFind/ST.hs | 198 +++ src/Numeric/Probability.hs | 113 ++ src/Phylomemy.hs | 13 + src/Phylomemy/DOT.hs | 222 ++++ src/Phylomemy/Indexation.hs | 157 +++ src/Phylomemy/References.hs | 34 + src/Phylomemy/Similarity.hs | 129 ++ src/Phylomemy/TemporalMatching.hs | 343 ++++++ src/Prelude.hs | 32 + .../Clustering/FrequentItemSet/AprioriSpec.hs | 83 ++ .../db=1.minSupp=1.minSize=1.golden | 26 + .../db=1.minSupp=1.minSize=2.golden | 34 + ...b=HAL03500847T2.minSupp=1.minSize=2.golden | 212 ++++ ...b=HAL03500847T2.minSupp=1.minSize=3.golden | 90 ++ ...b=HAL03500847T2.minSupp=1.minSize=4.golden | 18 + ...b=HAL03500847T2.minSupp=2.minSize=2.golden | 14 + ...b=HAL03500847T2.minSupp=2.minSize=3.golden | 1 + ...b=HAL03500847T2.minSupp=2.minSize=4.golden | 1 + ...b=HAL03500847T2.minSupp=3.minSize=2.golden | 1 + ...b=HAL03500847T2.minSupp=3.minSize=3.golden | 1 + ...b=HAL03500847T2.minSupp=3.minSize=4.golden | 1 + .../db=TakeakiUno.minSupp=2.minSize=2.golden | 152 +++ .../db=TakeakiUno.minSupp=2.minSize=3.golden | 42 + .../db=TakeakiUno.minSupp=3.minSize=2.golden | 104 ++ .../db=TakeakiUno.minSupp=3.minSize=3.golden | 18 + .../db=HAL03500847T2.minSupp=2.golden | 6 + .../db=HAL03500847T2.minSupp=3.golden | 1 + .../db=TakeakiUno.minSupp=2.golden | 17 + .../db=TakeakiUno.minSupp=3.golden | 12 + .../associationRules/TakeakiUno.golden.old | 1074 +++++++++++++++++ ...db=TakeakiUno.minSupp=2.minConf=75%.golden | 67 + ...db=TakeakiUno.minSupp=3.minConf=75%.golden | 52 + .../FrequentItemSet/BruteForceSpec.hs | 113 ++ .../db=1.minSupp=1.minSize=1.golden | 1 + .../db=1.minSupp=1.minSize=2.golden | 1 + .../db=1.minSupp=2.minSize=1.golden | 1 + .../db=1.minSupp=2.minSize=2.golden | 1 + .../db=2.minSupp=1.minSize=1.golden | 3 + .../db=2.minSupp=1.minSize=2.golden | 3 + .../db=2.minSupp=2.minSize=1.golden | 1 + .../db=2.minSupp=2.minSize=2.golden | 1 + .../db=3.minSupp=1.minSize=1.golden | 4 + .../db=3.minSupp=1.minSize=2.golden | 3 + .../db=3.minSupp=2.minSize=1.golden | 1 + .../db=3.minSupp=2.minSize=2.golden | 1 + .../db=4.minSupp=1.minSize=1.golden | 12 + .../db=4.minSupp=1.minSize=2.golden | 9 + .../db=4.minSupp=2.minSize=1.golden | 11 + .../db=4.minSupp=2.minSize=2.golden | 8 + tests/Clustering/FrequentItemSet/LCMSpec.hs | 149 +++ tests/Phylomemy/IndexationSpec.hs | 63 + tests/Phylomemy/SimilaritySpec.hs | 36 + tests/Phylomemy/TemporalMatchingSpec.hs | 161 +++ ...cs0.minSupp=1.minSize=2.lambda=0.00.golden | 161 +++ ...cs0.minSupp=1.minSize=2.lambda=0.30.golden | 180 +++ ...cs0.minSupp=1.minSize=2.lambda=1.00.golden | 180 +++ tests/Spec.hs | 21 + tests/Utils.hs | 50 + 77 files changed, 6618 insertions(+) create mode 100755 .chglog/CHANGELOG.tpl.md create mode 100755 .chglog/config.yml create mode 100644 .envrc create mode 100644 .gitignore create mode 100644 .hlint.yaml create mode 100644 .reuse/dep5 create mode 100644 ChangeLog.md create mode 100644 LICENSES/AGPL-3.0-or-later.txt create mode 100644 LICENSES/BSD-3-Clause.txt create mode 100644 LICENSES/CC0-1.0.txt create mode 100644 Makefile create mode 100644 Readme.md create mode 100644 flake.lock create mode 100644 flake.nix create mode 100644 fourmolu.yaml create mode 100644 literate-phylomemy.cabal create mode 100644 src/Clustering/FrequentItemSet/BruteForce.hs create mode 100644 src/Clustering/FrequentItemSet/LCM.hs create mode 100644 src/Clustering/FrequentItemSet/References.hs create mode 100644 src/Clustering/UnionFind/ST.hs create mode 100644 src/Numeric/Probability.hs create mode 100644 src/Phylomemy.hs create mode 100644 src/Phylomemy/DOT.hs create mode 100644 src/Phylomemy/Indexation.hs create mode 100644 src/Phylomemy/References.hs create mode 100644 src/Phylomemy/Similarity.hs create mode 100644 src/Phylomemy/TemporalMatching.hs create mode 100644 src/Prelude.hs create mode 100644 tests/Clustering/FrequentItemSet/AprioriSpec.hs create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=1.minSupp=1.minSize=1.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=1.minSupp=1.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=3.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=4.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=3.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=4.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=3.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=4.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=2.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=2.minSize=3.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=3.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=3.minSize=3.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=HAL03500847T2.minSupp=2.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=HAL03500847T2.minSupp=3.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=TakeakiUno.minSupp=2.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=TakeakiUno.minSupp=3.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/associationRules/TakeakiUno.golden.old create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/associationRules/db=TakeakiUno.minSupp=2.minConf=75%.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForce/associationRules/db=TakeakiUno.minSupp=3.minConf=75%.golden create mode 100644 tests/Clustering/FrequentItemSet/BruteForceSpec.hs create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=1.minSize=1.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=1.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=2.minSize=1.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=2.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=1.minSize=1.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=1.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=2.minSize=1.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=2.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=1.minSize=1.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=1.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=2.minSize=1.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=2.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=1.minSize=1.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=1.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=2.minSize=1.golden create mode 100644 tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=2.minSize=2.golden create mode 100644 tests/Clustering/FrequentItemSet/LCMSpec.hs create mode 100644 tests/Phylomemy/IndexationSpec.hs create mode 100644 tests/Phylomemy/SimilaritySpec.hs create mode 100644 tests/Phylomemy/TemporalMatchingSpec.hs create mode 100644 tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=0.00.golden create mode 100644 tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=0.30.golden create mode 100644 tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=1.00.golden create mode 100644 tests/Spec.hs create mode 100644 tests/Utils.hs diff --git a/.chglog/CHANGELOG.tpl.md b/.chglog/CHANGELOG.tpl.md new file mode 100755 index 0000000..07711aa --- /dev/null +++ b/.chglog/CHANGELOG.tpl.md @@ -0,0 +1,37 @@ +{{ range .Versions }} +## {{ .Tag.Name }} ({{ datetime "2006-01-02" .Tag.Date }}) + +{{ range .CommitGroups -}} +### {{ .Title }} + +{{ range .Commits -}} +* {{ upperFirst .Type }}{{ if .Subject }} {{ .Subject }}{{ end }}. +{{ end }} +{{ end -}} + +{{- if .RevertCommits -}} +### Reverts + +{{ range .RevertCommits -}} +* {{ .Revert.Header }} +{{ end }} +{{ end -}} + +{{- if .MergeCommits -}} +### Merges + +{{ range .MergeCommits -}} +* {{ .Header }} +{{ end }} +{{ end -}} + +{{- if .NoteGroups -}} +{{ range .NoteGroups -}} +### {{ .Title }} + +{{ range .Notes }} +{{ .Body }} +{{ end }} +{{ end -}} +{{ end -}} +{{ end -}} diff --git a/.chglog/config.yml b/.chglog/config.yml new file mode 100755 index 0000000..ba783ab --- /dev/null +++ b/.chglog/config.yml @@ -0,0 +1,35 @@ +style: none +template: CHANGELOG.tpl.md +info: + title: ChangeLog + repository_url: "https://git.sourcephile.fr/literate-phylomemy" +options: + sort: "date" + + commits: + sort_by: Type + + commit_groups: + group_by: Scope + sort_by: Custom + title_order: + - iface + - doc + - impl + - build + title_maps: + build: Build + doc: Documentation + iface: Interface + impl: Implementation + + header: + pattern: "^([\\w\\$\\.\\-\\*\\s]*)\\:\\s(\\w*)\\s*(.*)$" + pattern_maps: + - Scope + - Type + - Subject + + notes: + keywords: + - BREAKING CHANGE diff --git a/.envrc b/.envrc new file mode 100644 index 0000000..3550a30 --- /dev/null +++ b/.envrc @@ -0,0 +1 @@ +use flake diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..624ec5a --- /dev/null +++ b/.gitignore @@ -0,0 +1,24 @@ +*.actual.* +*.eventlog +*.eventlog.html +*.eventlog.json +*.hi +*.hp +*.o +*.orig +*.prof +*.root +.direnv/ +.ghc.environment.* +.pre-commit-config.yaml +.stack-work/ +cabal.project.local +core +dist-newstyle/ +dist/ +dump-core/ +hlint.html +old/ +result* +sydtest-profile.html +.gitsigners diff --git a/.hlint.yaml b/.hlint.yaml new file mode 100644 index 0000000..3e69552 --- /dev/null +++ b/.hlint.yaml @@ -0,0 +1,30 @@ +- extensions: + - name: Haskell2010 + - name: NoCPP + - name: TypeApplications + +- ignore: {name: Avoid lambda} +- ignore: {name: Avoid lambda using `infix`} +- ignore: {name: Move brackets to avoid $} +- ignore: {name: Reduce duplication} +- ignore: {name: Redundant $} +- ignore: {name: Redundant bracket} +- ignore: {name: Redundant do} +- ignore: {name: Redundant lambda} +- ignore: {name: Use ++} +- ignore: {name: Use camelCase} +- ignore: {name: Use const} +- ignore: {name: Use fmap} +- ignore: {name: Use guards} +- ignore: {name: Use id} +- ignore: {name: Use if} +- ignore: {name: Use import/export shortcut} +- ignore: {name: Use infix} +- ignore: {name: Use list literal pattern} +- ignore: {name: Use list literal} + +# BEGIN: generated hints +- fixity: "infixr 0 :->" +- fixity: "infixr 0 :=" +- fixity: "infixr 9 >.>" +# END: generated hints diff --git a/.reuse/dep5 b/.reuse/dep5 new file mode 100644 index 0000000..488cf96 --- /dev/null +++ b/.reuse/dep5 @@ -0,0 +1,12 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: literate-phylomemy +Upstream-Contact: Julien Moutinho +Source: https://git.sourcephile.fr/literate-phylomemy + +Files: *.nix *.lock cabal.project *.cabal *.md .chglog/* .envrc fourmolu.yaml .gitignore .hlint.yaml Makefile tests/* +Copyright: Julien Moutinho +License: CC0-1.0 + +Files: libs/* src/* +Copyright: Julien Moutinho +License: AGPL-3.0-or-later diff --git a/ChangeLog.md b/ChangeLog.md new file mode 100644 index 0000000..dd50ccc --- /dev/null +++ b/ChangeLog.md @@ -0,0 +1,3 @@ +## literate-phylomemy-0.0.0.20240613 (2024-06-13) + +- Initial version diff --git a/LICENSES/AGPL-3.0-or-later.txt b/LICENSES/AGPL-3.0-or-later.txt new file mode 100644 index 0000000..0c97efd --- /dev/null +++ b/LICENSES/AGPL-3.0-or-later.txt @@ -0,0 +1,235 @@ +GNU AFFERO GENERAL PUBLIC LICENSE +Version 3, 19 November 2007 + +Copyright (C) 2007 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. + + Preamble + +The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. + +The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. + +Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. + +A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. + +The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. + +An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. + +The precise terms and conditions for copying, distribution and modification follow. + + TERMS AND CONDITIONS + +0. Definitions. + +"This License" refers to version 3 of the GNU Affero General Public License. + +"Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. + +"The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. + +To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. + +A "covered work" means either the unmodified Program or a work based on the Program. + +To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. + +To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. + +An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. + +1. Source Code. +The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. + +A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. + +The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. + +The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those +subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same work. + +2. Basic Permissions. +All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. + +3. Protecting Users' Legal Rights From Anti-Circumvention Law. +No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. + +When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. + +4. Conveying Verbatim Copies. +You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. + +5. Conveying Modified Source Versions. +You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". + + c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. + +A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. + +6. Conveying Non-Source Forms. +You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: + + a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. + + d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. + +A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. + +A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. + +"Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. + +If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). + +The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. + +7. Additional Terms. +"Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or authors of the material; or + + e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. + +All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. + +8. Termination. + +You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). + +However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. + +Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. + +Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. + +9. Acceptance Not Required for Having Copies. + +You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. + +10. Automatic Licensing of Downstream Recipients. + +Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. + +An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. + +11. Patents. + +A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". + +A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. + +In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. + +If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. + +A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. + +12. No Surrender of Others' Freedom. + +If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. + +13. Remote Network Interaction; Use with the GNU General Public License. + +Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. + +Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. + +14. Revised Versions of this License. + +The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. + +Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. + +15. Disclaimer of Warranty. + +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +16. Limitation of Liability. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +17. Interpretation of Sections 15 and 16. + +If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. + + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + +If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. + +You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . diff --git a/LICENSES/BSD-3-Clause.txt b/LICENSES/BSD-3-Clause.txt new file mode 100644 index 0000000..ea890af --- /dev/null +++ b/LICENSES/BSD-3-Clause.txt @@ -0,0 +1,11 @@ +Copyright (c) . + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/LICENSES/CC0-1.0.txt b/LICENSES/CC0-1.0.txt new file mode 100644 index 0000000..0e259d4 --- /dev/null +++ b/LICENSES/CC0-1.0.txt @@ -0,0 +1,121 @@ +Creative Commons Legal Code + +CC0 1.0 Universal + + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..4b0b644 --- /dev/null +++ b/Makefile @@ -0,0 +1,93 @@ +cabal := $(wildcard *.cabal) +package := $(notdir ./$(cabal:.cabal=)) +version := $(shell sed -ne 's/^version: *\(.*\)/\1/p' $(cabal)) +project := $(patsubst %.cabal,%,$(cabal)) +cabal_builddir ?= dist-newstyle +sourceDirs := $(wildcard src tests) + +override REPL_OPTIONS += -ignore-dot-ghci +override GHCID_OPTIONS += --no-height-limit --reverse-errors --color=always --restart $(cabal) --warning + +all: build +build: + cabal build $(CABAL_BUILD_FLAGS) +clean c: + cabal clean +repl: + cabal repl $(CABAL_REPL_FLAGS) $(project) +ghcid: + ghcid $(GHCID_OPTIONS) --command 'cabal repl -fno-code $(CABAL_REPL_FLAGS) $(project) $(addprefix --repl-options ,$(REPL_OPTIONS))' +ghciwatch: + ghciwatch --watch src + +.PHONY: tests +t tests: + cabal test $(CABAL_TEST_FLAGS) \ + --test-show-details always --test-options "$(TEST_OPTIONS)" +t/repl tests/repl: + cabal repl -fno-code $(CABAL_REPL_FLAGS) $(CABAL_TEST_FLAGS) $(project)-tests +t/ghcid tests/ghcid: + ghcid $(GHCID_OPTIONS) --command 'cabal repl -fno-code $(CABAL_REPL_FLAGS) $(project) $(addprefix --repl-options ,$(REPL_OPTIONS))' \ + --run=':! ghcid $(GHCID_OPTIONS) --command "cabal repl -fno-code $(CABAL_REPL_FLAGS) $(CABAL_TEST_FLAGS) $(project)-tests" --test ":main $(TEST_OPTIONS)"' + +%/accept: TEST_OPTIONS += --golden-start +%/accept: % + + +%/reset: TEST_OPTIONS += --golden-reset +%/reset: % + + +doc: + cabal haddock --haddock-css ocean --haddock-hyperlink-source + +.PHONY: ChangeLog.md +ChangeLog.md: + ! git tag --merged | grep -Fqx $(package)-$(version) + git diff --exit-code + git tag -f $(package)-$(version) + git-chglog --output $@.new --tag-filter-pattern '$(package)-.*' $(package)-$(version) + touch $@ + cat $@ >>$@.new + mv -f $@.new $@ + git tag -d $(package)-$(version) + git add '$@' + git commit -m 'doc: update `$@`' +tag: build ChangeLog.md + git tag -s -m $(package)-$(version) $(package)-$(version) + +tar: + git diff --exit-code + reuse lint + cabal sdist + cabal haddock --haddock-for-hackage --enable-doc +upload: LANG=C +upload: tar + git tag --merged | grep -Fqx $(package)-$(version) + git push --follow-tags $(GIT_PUSH_FLAGS) + cabal upload $(CABAL_UPLOAD_FLAGS) "$(cabal_builddir)"/sdist/$(package)-$(version).tar.gz + cabal upload $(CABAL_UPLOAD_FLAGS) --documentation "$(cabal_builddir)"/$(package)-$(version)-docs.tar.gz +%/publish: CABAL_UPLOAD_FLAGS+=--publish +%/publish: % + +publish: upload/publish + +.PHONY: .hlint.yaml +.hlint.yaml: + sed -i -e '/^# BEGIN: generated hints/,/^# END: generated hints/d' $@ + echo >>$@ '# BEGIN: generated hints' + find $(sourceDirs) -name "*.hs" | xargs -P $(shell nproc) -I {} \ + hlint --find {} | grep -- '- fixity:' | sort -u >>$@ + echo >>$@ '# END: generated hints' +lint: .hlint.yaml + if hlint --quiet --report=hlint.html $(HLINT_FLAGS) $(sourceDirs); \ + then rm -f hlint.html; \ + else sensible-browser ./hlint.html & fi +lint/apply: .hlint.yaml + find $(sourceDirs) -name "*.hs" | xargs -P $(shell nproc) -I {} \ + hlint --refactor-options="-i" --refactor {} + +style: + find $(sourceDirs) -name "*.hs" | xargs -P $(shell nproc) -I {} \ + fourmolu -q -o -XImportQualifiedPost --mode inplace {} + cabal-fmt -i *.cabal diff --git a/Readme.md b/Readme.md new file mode 100644 index 0000000..68bf514 --- /dev/null +++ b/Readme.md @@ -0,0 +1,52 @@ +## Description + +A phylomemetic network (or phylomemy) is an adaptation +of the concept of the phylogenetic tree, +combined with Richard Dawkins' intuition of a meme, +to describe the complex dynamic structure of transformation of relations between terms. + +This package is a partial implementation of some +noteworthy algorithms composed to compute a phylomemy, +in order to understand and test them. + +## Clustering + +### Linear time Closed itemset Miner (LCM) + +Based upon: +- « HLCM: a first experiment on parallel data mining with Haskell ». + By Alexandre Termier & Benjamin Négrevergne & Simon Marlow & Satnam Singh + + From the original LCM algorithm from Takaki Uno and Hiroki Arimura. + + +### Maximal clique +TODO + +## Temporal matching + +#### Maximal spanning forest + +> If the order in which edges will be deleted is known ahead of time, then we +> can solve the dynamic connectivity problem in time `O(log n)` per query. If +> we can maintain a maximum spanning forest where edges are ordered by their +> deletion time, we know that when we delete some edge that is in the forest, +> there is no possible edge that can replace it. If there were some edge that +> connects the same two components the deleted edge does, then this other edge +> would have been part of the maximum spanning forest instead of the edge we +> deleted. This makes the delete operation trivial: we simply need to split the +> tree into its two parts if the edge to delete is part of our forest, or +> ignore the operation otherwise. +https://en.wikipedia.org/wiki/Dynamic_connectivity#Offline_dynamic_connectivity + +## Acknowledgements + +Based upon: + +- Chavalarias D & Cointet J-P (2013). + « Phylomemetic Patterns in Science Evolution—The Rise and Fall of Scientific Fields ». + PLoS ONE 8(2): e54847. + +- Chavalarias, D. & Lobbé, Q. & Delanoë, A. (2021). + « Draw me Science: Multi-level and multi-scale reconstruction of knowledge dynamics with phylomemies ». + Scientometrics. diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000..834027c --- /dev/null +++ b/flake.lock @@ -0,0 +1,282 @@ +{ + "nodes": { + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "git-hooks": { + "inputs": { + "flake-compat": "flake-compat", + "flake-utils": "flake-utils", + "gitignore": "gitignore", + "nixpkgs": [ + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1715609711, + "narHash": "sha256-/5u29K0c+4jyQ8x7dUIEUWlz2BoTSZWUP2quPwFCE7M=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "c182c876690380f8d3b9557c4609472ebfa1b141", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "git-hooks.nix", + "type": "github" + } + }, + "git-hooks_2": { + "inputs": { + "flake-compat": "flake-compat_2", + "flake-utils": "flake-utils_2", + "gitignore": "gitignore_2", + "nixpkgs": [ + "logic", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable_2" + }, + "locked": { + "lastModified": 1715609711, + "narHash": "sha256-/5u29K0c+4jyQ8x7dUIEUWlz2BoTSZWUP2quPwFCE7M=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "c182c876690380f8d3b9557c4609472ebfa1b141", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "git-hooks.nix", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "git-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "gitignore_2": { + "inputs": { + "nixpkgs": [ + "logic", + "git-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "logic": { + "inputs": { + "git-hooks": "git-hooks_2", + "nixpkgs": "nixpkgs" + }, + "locked": { + "lastModified": 1719224209, + "narHash": "sha256-9wLe/nHEJ55MP7j/xFeLbkYUIJDrQ5MHfw2lZn7ynfQ=", + "ref": "refs/heads/main", + "rev": "68250d2551950b4ac2a76a31e3e8f0274fec5040", + "revCount": 1, + "type": "git", + "url": "https://radicle-mermet.sourcephile.fr/z3795BqJN8hSMGkyAUr8hHviEEi2H.git" + }, + "original": { + "type": "git", + "url": "https://radicle-mermet.sourcephile.fr/z3795BqJN8hSMGkyAUr8hHviEEi2H.git" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1716793392, + "narHash": "sha256-ex3nO87EEQhshXd19QSVW5UIXL0pbPuew4q8TdEJQBY=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "67a8b308bae9c26be660ccceff3e53a65e01afe1", + "type": "github" + }, + "original": { + "id": "nixpkgs", + "type": "indirect" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-stable_2": { + "locked": { + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1727264057, + "narHash": "sha256-KQPI8CTTnB9CrJ7LrmLC4VWbKZfljEPBXOFGZFRpxao=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "759537f06e6999e141588ff1c9be7f3a5c060106", + "type": "github" + }, + "original": { + "id": "nixpkgs", + "type": "indirect" + } + }, + "root": { + "inputs": { + "git-hooks": "git-hooks", + "logic": "logic", + "nixpkgs": "nixpkgs_2" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000..60d83c7 --- /dev/null +++ b/flake.nix @@ -0,0 +1,79 @@ +{ + inputs = { + nixpkgs.url = "flake:nixpkgs"; + git-hooks.url = "github:cachix/git-hooks.nix"; + git-hooks.inputs.nixpkgs.follows = "nixpkgs"; + logic.url = "git+https://radicle-mermet.sourcephile.fr/z3795BqJN8hSMGkyAUr8hHviEEi2H.git"; + }; + outputs = inputs: + let + pkg = "literate-phylomemy"; + lib = inputs.nixpkgs.lib; + fileInputs = with lib.fileset; toSource { + root = ./.; + fileset = unions [ + ./literate-phylomemy.cabal + ./ChangeLog.md + ./Readme.md + ./LICENSES + (fileFilter (file: lib.any file.hasExt [ "hs" ]) ./src) + (fileFilter (file: lib.any file.hasExt [ "hs" "golden" ]) ./tests) + ]; + }; + perSystem = f: lib.genAttrs lib.systems.flakeExposed (system: f rec { + inherit system; + pkgs = inputs.nixpkgs.legacyPackages.${system}; + haskellPackages = pkgs.haskellPackages.extend (with pkgs.haskell.lib; finalHaskellPkgs: previousHaskellPkgs: { + ${pkg} = buildFromSdist (finalHaskellPkgs.callCabal2nix pkg fileInputs { }); + logic = finalHaskellPkgs.callCabal2nix "logic" inputs.logic { }; + #union-find = doJailbreak (finalHaskellPkgs.callHackage "union-find" "0.2" { }); + #disjoint-containers = unmarkBroken previousHaskellPkgs.disjoint-containers; + #splaytree = doJailbreak (unmarkBroken previousHaskellPkgs.splaytree); + }); + }); + in + rec { + # nix -L build + packages = perSystem ({ haskellPackages, ... }: { + default = haskellPackages.${pkg}; + }); + # nix -L develop or direnv allow + devShells = perSystem ({ pkgs, haskellPackages, system, ... }: { + default = + haskellPackages.shellFor { + packages = ps: [ ps.${pkg} ]; + nativeBuildInputs = [ + haskellPackages.cabal-fmt + haskellPackages.cabal-install + haskellPackages.ghcid + haskellPackages.haskell-language-server + haskellPackages.hlint + pkgs.ghciwatch + pkgs.git-chglog + pkgs.reuse + pkgs.xdot + ]; + withHoogle = false; + inherit (checks.${system}.git-hooks-check) shellHook; + }; + }); + # nix flake check + checks = perSystem (args: with args; { + git-hooks-check = inputs.git-hooks.lib.${system}.run { + src = ./.; + hooks = { + cabal-fmt.enable = true; + fourmolu.enable = true; + hlint.enable = true; + nixpkgs-fmt.enable = true; + ormolu.settings.cabalDefaultExtensions = true; + reuse = { + enable = true; + entry = "${pkgs.reuse}/bin/reuse lint"; + pass_filenames = false; + }; + }; + }; + }); + }; +} diff --git a/fourmolu.yaml b/fourmolu.yaml new file mode 100644 index 0000000..fc4720c --- /dev/null +++ b/fourmolu.yaml @@ -0,0 +1,52 @@ +# Number of spaces per indentation step +indentation: 2 + +# Max line length for automatic line breaking +column-limit: none + +# Styling of arrows in type signatures (choices: trailing, leading, or leading-args) +function-arrows: trailing + +# How to place commas in multi-line lists, records, etc. (choices: leading or trailing) +comma-style: leading + +# Styling of import/export lists (choices: leading, trailing, or diff-friendly) +import-export-style: diff-friendly + +# Whether to full-indent or half-indent 'where' bindings past the preceding body +indent-wheres: true + +# Whether to leave a space before an opening record brace +record-brace-space: false + +# Number of spaces between top-level declarations +newlines-between-decls: 1 + +# How to print Haddock comments (choices: single-line, multi-line, or multi-line-compact) +haddock-style: single-line + +# How to print module docstring +haddock-style-module: null + +# Styling of let blocks (choices: auto, inline, newline, or mixed) +let-style: mixed + +# How to align the 'in' keyword with respect to the 'let' keyword (choices: left-align, right-align, or no-space) +in-style: no-space + +# Whether to put parentheses around a single constraint (choices: auto, always, or never) +single-constraint-parens: never + +# Output Unicode syntax (choices: detect, always, or never) +unicode: never + +# Give the programmer more choice on where to insert blank lines +respectful: true + +# Fixity information for operators +fixities: + - infixr 0 ... + - "infixr 0 :::" + +# Module reexports Fourmolu should know about +reexports: [] diff --git a/literate-phylomemy.cabal b/literate-phylomemy.cabal new file mode 100644 index 0000000..acf4288 --- /dev/null +++ b/literate-phylomemy.cabal @@ -0,0 +1,197 @@ +cabal-version: 3.0 +name: literate-phylomemy +maintainer: mailto:literate-phylomemy@sourcephile.fr +bug-reports: + https://radicle.sourcephile.fr/nodes/radicle-mermet.sourcephile.fr/rad:z2364hmzZUAGy1nKdSFa1gLSoUE2M/issues + +homepage: + https://radicle.sourcephile.fr/nodes/radicle-mermet.sourcephile.fr/rad:z2364hmzZUAGy1nKdSFa1gLSoUE2M + +author: Julien Moutinho +copyright: Julien Moutinho +license: AGPL-3.0-or-later +license-file: LICENSES/AGPL-3.0-or-later.txt + +-- PVP: +-+------- breaking API changes +-- | | +----- non-breaking API additions +-- | | | +--- code changes with no API change +version: 0.0.0.20240619 +stability: experimental +category: Data mining +synopsis: Phylomemetic network algorithms +description: + A phylomemetic network (or phylomemy) is an adaptation + of the concept of the phylogenetic tree, + combined with Richard Dawkins' intuition of a meme, + to describe the complex dynamic structure of transformation + of relations between terms. + + Based upon: + + * Chavalarias D, Cointet J-P (2013) Phylomemetic Patterns in Science Evolution—The Rise and Fall of Scientific Fields. PLoS ONE 8(2): e54847. + + * Chavalarias, D., Lobbé, Q., Delanoë, A., 2021. Draw me Science: Multi-level and multi-scale reconstruction of knowledge dynamics with phylomemies. Scientometrics. + +build-type: Simple +tested-with: GHC ==9.6.5 +extra-doc-files: + ChangeLog.md + Readme.md + +-- :r! find tests -name "*.golden" | sort +extra-source-files: + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=1.minSupp=1.minSize=1.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=1.minSupp=1.minSize=2.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=2.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=3.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=4.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=2.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=3.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=4.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=2.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=3.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=4.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=2.minSize=2.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=2.minSize=3.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=3.minSize=2.golden + tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=3.minSize=3.golden + tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=HAL03500847T2.minSupp=2.golden + tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=HAL03500847T2.minSupp=3.golden + tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=TakeakiUno.minSupp=2.golden + tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=TakeakiUno.minSupp=3.golden + tests/Clustering/FrequentItemSet/BruteForce/associationRules/db=TakeakiUno.minSupp=2.minConf=75%.golden + tests/Clustering/FrequentItemSet/BruteForce/associationRules/db=TakeakiUno.minSupp=3.minConf=75%.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=1.minSize=1.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=1.minSize=2.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=2.minSize=1.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=2.minSize=2.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=1.minSize=1.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=1.minSize=2.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=2.minSize=1.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=2.minSize=2.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=1.minSize=1.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=1.minSize=2.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=2.minSize=1.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=2.minSize=2.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=1.minSize=1.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=1.minSize=2.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=2.minSize=1.golden + tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=2.minSize=2.golden + tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=0.00.golden + tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=0.30.golden + tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=1.00.golden + +extra-tmp-files: + +source-repository head + type: git + location: + https://radicle-mermet.sourcephile.fr/z2364hmzZUAGy1nKdSFa1gLSoUE2M.git + +source-repository head + type: rad + location: rad://z2364hmzZUAGy1nKdSFa1gLSoUE2M + +common haskell-variant + default-language: Haskell2010 + default-extensions: + BlockArguments + DefaultSignatures + DeriveGeneric + DerivingStrategies + FlexibleContexts + FlexibleInstances + GeneralizedNewtypeDeriving + ImportQualifiedPost + LambdaCase + MultiParamTypeClasses + NamedFieldPuns + NoImplicitPrelude + PatternSynonyms + RecordWildCards + RoleAnnotations + ScopedTypeVariables + TupleSections + TypeApplications + TypeFamilies + TypeOperators + ViewPatterns + + ghc-options: + -Wall -Wincomplete-uni-patterns -Wincomplete-record-updates + -Wpartial-fields -fprint-potential-instances + +common library-deps + build-depends: + , array + , base >=4.10 && <5 + , bytestring + , containers + , deepseq + , hashable + , logic + , parallel + , pretty-simple + , random + , safe-decimal + , scientific + , text + , text-short + , time + , transformers + , unordered-containers + , validity + , validity-containers + , validity-time + , vector + +library + import: haskell-variant, library-deps + hs-source-dirs: src + exposed-modules: + Clustering.FrequentItemSet.BruteForce + Clustering.FrequentItemSet.LCM + Clustering.FrequentItemSet.References + Clustering.UnionFind.ST + Numeric.Probability + Phylomemy + Phylomemy.DOT + Phylomemy.Indexation + Phylomemy.References + Phylomemy.Similarity + Phylomemy.TemporalMatching + + other-modules: Prelude + build-depends: base >=4.10 && <5 + +test-suite literate-phylomemy-tests + import: haskell-variant, library-deps + type: exitcode-stdio-1.0 + hs-source-dirs: tests + main-is: Spec.hs + + -- build-tool-depends: sydtest-discover:sydtest-discover + ghc-options: -threaded -rtsopts -with-rtsopts=-N + other-modules: + Clustering.FrequentItemSet.BruteForceSpec + Clustering.FrequentItemSet.LCMSpec + Paths_literate_phylomemy + Phylomemy.IndexationSpec + Phylomemy.SimilaritySpec + Phylomemy.TemporalMatchingSpec + Utils + + autogen-modules: Paths_literate_phylomemy + build-depends: + , filepath + , genvalidity + , genvalidity-containers + , genvalidity-sydtest + , genvalidity-text + , genvalidity-time + , literate-phylomemy + , relude + , sydtest + , validity + , validity-containers + , validity-text diff --git a/src/Clustering/FrequentItemSet/BruteForce.hs b/src/Clustering/FrequentItemSet/BruteForce.hs new file mode 100644 index 0000000..d78a8fc --- /dev/null +++ b/src/Clustering/FrequentItemSet/BruteForce.hs @@ -0,0 +1,204 @@ +{-# LANGUAGE RankNTypes #-} +{-# OPTIONS_GHC -Wno-deprecations #-} + +-- | Brute-force algorithms related to mining frequent item sets. +-- +-- Definition: in `Clustering.FrequentItemSet.References.RefAgrawalSrikantApriori`: +-- +-- > Given a set of transactions D, the problem of mining +-- > association rules is to generate all association rules +-- > that have support and confidence greater than the +-- > user-specified minimum support (called minsup) and +-- > minimum confidence (called minconf) respectively. +module Clustering.FrequentItemSet.BruteForce ( + type ItemSet, + Transaction (..), + type Support (), + type FrequentItemSet (), + frequentItemSets, + type AllItems (), + allFrequentItemSets, + type AssociationRule (), + type AssociationConfidence (), + type Association (..), + associationRules, + -- type Clusters, + -- type ClosedFrequentItemSets (), + -- closedFrequentItemSets, + -- allClosedFrequentItemSets, +) where + +import Data.Bool +import Data.Eq (Eq (..)) +import Data.Foldable (foldMap) +import Data.Function ((&)) +import Data.Functor ((<&>)) +import Data.Int (Int) +import Data.List qualified as List +import Data.Monoid (Monoid (..)) +import Data.Ord qualified as Ord +import Data.Ratio ((%)) +import Data.Set (Set) +import Data.Set qualified as Set +import Data.Validity (Validity (..), delve) +import GHC.Generics (Generic) +import Logic +import Logic.Theory.Arithmetic (Zero) +import Logic.Theory.Ord +import Numeric.Probability +import Prelude (fromIntegral, (+)) +import Text.Show (Show (..)) + +type ItemSet = Set + +data Transaction item a = Transaction + { transactionData :: a + , transactionItems :: ItemSet item + } + deriving (Show, Generic) +instance (Validity (ItemSet item), Validity a) => Validity (Transaction item a) where + validate x = + mconcat + [ delve "transactionData" (transactionData x) + , delve "transactionItems" (transactionItems x) + ] + +data Support itemSet db = SupportAxiom +instance Axiom (Support itemSet db ::: Int >= Zero) + +-- | Return the number of occurrences of @(itemSet)@ in @(db)@. +support :: + Ord.Ord item => + items ::: ItemSet item -> + db ::: [Transaction item a] -> + Support itemSet db ::: Int +support (Named items) (Named db) = + SupportAxiom ... List.length (List.filter (items `Set.isSubsetOf`) (db <&> transactionItems)) + +data FrequentItemSet items db minSupp = FrequentItemSetAxiom + +-- | Determine the frequent itemsets. +frequentItemSets :: + Ord.Ord item => + items ::: ItemSet item -> + db ::: [Transaction item a] -> + minSupp ::: Int / minSupp > Zero -> + [FrequentItemSet items db minSupp ::: ItemSet item] +frequentItemSets (Named items) (Named db) (Named minSupp) = + [ FrequentItemSetAxiom ... sub + | (sub, occ) <- occBySubSet (db <&> transactionItems) + , occ Ord.>= minSupp + ] + where + -- TimeEfficiencyWarning: iterate only once through the given `(db)` + -- to count the occurence of each subset of the given `(items)`. + -- occBySubSet :: [Transaction item a] -> [(ItemSet item, Int)] + occBySubSet = + List.foldr + (\t -> List.map (\(sub, occ) -> (sub, if sub `Set.isSubsetOf` t then occ + 1 else occ))) + [(sub, 0) | sub <- Set.powerSet items & Set.toList] + +data AllItems db = AllItemsAxiom + +-- | @ +-- `allFrequentItemSets` db minSupp = `frequentItemSets` is db minSupp +-- @ +-- where `(is)` gathers all the items present in the given `(db)`. +allFrequentItemSets :: + Ord.Ord item => + db ::: [Transaction item a] -> + minSupp ::: Int / minSupp > Zero -> + [ FrequentItemSet (AllItems db) db minSupp + ::: ItemSet item + ] +allFrequentItemSets db = + frequentItemSets (AllItemsAxiom ... foldMap transactionItems (unName db)) db + +-- | An association rule. +-- +-- Definition: in `Clustering.FrequentItemSet.References.RefAgrawalSrikantApriori`: +-- +-- > An association rule is an implication of the form +-- > X ⇒ Y, where X ⊂ I, Y ⊂ I, and X ∩ Y = ∅. +-- > +-- > The rule X ⇒ Y holds in the transaction set D with confidence c +-- > if c% of transactions in D that contain X also contain Y. +-- > +-- > The rule X ⇒ Y has support s in the transaction set D +-- > if s% of transactions in D contain X ∪ Y. +data AssociationRule items db minSupp = AssociationRuleAxiom + +data Association items db minSupp minConf item = Association + { associationCause :: FrequentItemSet items db minSupp ::: ItemSet item + -- ^ CorrectnessProperty: `associationCause` is a `FrequentItemSet`. + -- Because `freqSet` is frequent, and `causeSet` ⊂ `freqSet`. + , associationConfidence :: AssociationConfidence items db minSupp minConf ::: Probability + , -- CorrectnessProperty: `associationConfidence` is a `Probability`. + -- Because P(consequenceSet | causeSet) = P(causeSet ∩ consequenceSet) / P(causeSet) + associationConsequence :: FrequentItemSet items db minSupp ::: ItemSet item + -- ^ CorrectnessProperty: `associationConsequence` is a `FrequentItemSet`. + -- Because `freqSet` is frequent, and `consequenceSet` ⊂ `freqSet`. + } + deriving (Show, Eq) + +data AssociationConfidence items db minSupp minConf = AssociationConfidenceAxiom + +-- | By construction in `associationRules`. +instance Axiom (AssociationConfidence items db minSupp minConf >= minConf) + +-- | @ +-- `associationRules` freqSet db minConf +-- @ +-- generates association rules from the given `FrequentItemSet` `(freqSet)` +-- in the given `(db)`, +-- with a 'Confidence' greater or equal to the given `(minConf)`. +-- +-- Algorithm: in `Clustering.FrequentItemSet.References.RefAgrawalSrikantApriori`, +-- section 1.1 "Problem Decomposition and Paper Organization", point 2: +-- +-- For a given `FrequentItemSet` @(freqSet)@, +-- For every @(causeSet)@ non-empty subset of @(freqSet)@. +-- output a rule of the form @(causeSet => (freqSet - causeSet))@ +-- if in the given @(db)@, +-- the ratio @(`support` freqSet)@ +-- over @(`support` causeSet)@ +-- is greater or egal to the given @(minConf)@. +-- +-- CorrectnessNote: +-- > The association rules we consider are probabilistic in nature. +-- > The presence of a rule X → A does not necessarily mean +-- > that X + Y → A also holds because the latter may +-- > not have minimum support. +-- > Similarly, the presence of rules X → Y and Y → Z +-- > does not necessarily mean > that X → Z holds +-- > because the latter may not have minimum confidence. +-- +-- PerformanceTimeWarning: traverses the given `(db)` +-- once for each non-empty subset of the given `(freqSet)`. +associationRules :: + Ord.Ord item => + FrequentItemSet items db minSupp ::: ItemSet item -> + db ::: [Transaction item a] -> + minConf ::: Probability -> + [ AssociationRule items db minSupp + ::: Association items db minSupp minConf item + ] +associationRules freqSet db minConf = + [ AssociationRuleAxiom + ... Association + { associationCause = FrequentItemSetAxiom ... causeSet + , associationConfidence = AssociationConfidenceAxiom ... confidence + , -- CorrectnessProperty: `consequenceSet` is frequent. + -- Because `freqSet` is frequent, and `consequenceSet` ⊂ `freqSet`. + associationConsequence = FrequentItemSetAxiom ... consequenceSet + } + | causeSet <- Set.powerSet (unName freqSet) & Set.toList + , not (Set.null causeSet) + , let consequenceSet = unName freqSet `Set.difference` causeSet + , not (Set.null consequenceSet) + , let Named causeOcc = support (() ... causeSet) db + , confidence <- probability (fromIntegral freqSetOcc % fromIntegral causeOcc) + , unName minConf Ord.<= confidence + ] + where + Named freqSetOcc = support freqSet db diff --git a/src/Clustering/FrequentItemSet/LCM.hs b/src/Clustering/FrequentItemSet/LCM.hs new file mode 100644 index 0000000..7000d48 --- /dev/null +++ b/src/Clustering/FrequentItemSet/LCM.hs @@ -0,0 +1,676 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- SPDX-FileCopyrightText: 2010 Alexandre Termier et al. + +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE MagicHash #-} +{-# LANGUAGE UnboxedTuples #-} +{-# OPTIONS_GHC -Wno-unused-imports #-} + +-- | Original LCM algorithm from: +-- +-- \"An efficient algorithm for enumerating closed patterns in transaction databases\". +-- Takeaki Uno, Tatsuya Asai, Yuzo Uchida, and Hiroki Arimura. +-- In Discovery Science, pages 16–31, 2004. +-- https://research.nii.ac.jp/~uno/papers/lcm_ds04.pdf +-- https://www.annas-archive.org/scidb/10.1007/978-3-540-30214-8_2 +-- +-- Code adapted from HLCM: +-- +-- \"HLCM: a first experiment on parallel data mining with Haskell\". +-- Alexandre Termier & Benjamin Négrevergne & Simon Marlow & Satnam Singh. +-- https://lig-membres.imag.fr/termier/HLCM/hlcm.pdf +-- https://hackage.haskell.org/package/hlcm +-- +-- Possible future work: +-- +-- \"Discovering closed frequent itemsets on multicore: +-- parallelizing computations and optimizing memory accesses\". +-- Benjamin Négrevergne & Alexandre Termier & Jean-François Méhaut & Takeaki Uno. +-- https://people.irisa.fr/Alexandre.Termier/publis/2010_negrevergne_hppddm.pdf +module Clustering.FrequentItemSet.LCM ( + type ItemSet, + Transaction (..), + type FrequentItemSets (), + frequentItemSets, + type AllItems (), + allFrequentItemSets, + type Clusters, + type ClosedFrequentItemSets (), + closedFrequentItemSets, + allClosedFrequentItemSets, + itemToSupport, + runLCM, + type ItemSupport, +) +where + +import Clustering.FrequentItemSet.BruteForce (ItemSet, Support (), Transaction (..)) +import Data.Bifunctor (second) +import Data.Bool +import Data.Eq (Eq (..)) +import Data.Foldable (fold, foldMap) +import Data.Function (id, ($), (&), (.)) +import Data.Functor ((<$>), (<&>)) +import Data.Int (Int) +import Data.Monoid (Monoid (..)) +import Data.Ord (Down, Ord (..), comparing) +import Data.Ratio ((%)) +import Data.Semigroup (Semigroup (..)) +import Data.Sequence qualified as Seq +import Data.Tuple (fst, snd) +import Data.Validity (Validity (..), delve) +import Debug.Pretty.Simple (pTraceShow) +import Debug.Trace +import GHC.Generics (Generic) +import GHC.IsList (toList) +import GHC.Stack (HasCallStack) +import Logic +import Logic.Theory.Arithmetic (Zero) +import Logic.Theory.Ord qualified as Logic.Ord +import Numeric.Probability +import Text.Show (Show (..)) +import Prelude (Enum, Num, error, fromIntegral, (+), (-)) + +import Control.Exception (evaluate) +import Control.Monad +import Control.Monad.ST +import Control.Parallel +import Control.Parallel.Strategies +import Data.Array.Base +import Data.Array.ST +import Data.Array.Unboxed +import Data.ByteString.Char8 qualified as L +import Data.List +import Data.List qualified as List +import Data.Map.Strict (Map) +import Data.Map.Strict qualified as Map +import Data.Maybe (catMaybes, fromJust, isNothing, maybeToList) +import Data.Set (Set) +import Data.Set qualified as Set +import Data.Vector qualified as V +import GHC.Exts hiding (Item) +import GHC.ST + +data FrequentItemSets items db minSupp minSize = FrequentItemSetsAxiom + +-- | +-- @ +-- `frequentItemSets` minSupp minSize items db +-- @ +-- returns a list of the closed frequent itemsets of +-- the transactions @(db)@ restricted to the specified @(items)@, +-- and such that the number of transactions containing them is greater or equal to @(minSupp)@, +-- and such that the size of those transactions is greater or equal to @(minSize)@. +-- Each closed frequent itemset is coupled with the sequence of the transactions containing them. +frequentItemSets :: + forall a item db minSize minSupp items. + Ord item => + Show item => + Show a => + minSupp ::: Int / minSupp Logic.Ord.> Zero -> + minSize ::: Int / minSize Logic.Ord.> Zero -> + items ::: ItemSet item -> + db ::: [Transaction item a] -> + FrequentItemSets items db minSupp minSize + ::: Clusters item a +frequentItemSets (Named minSupp) (Named minSize) (Named items) (Named db) = + FrequentItemSetsAxiom ... + Map.fromDistinctAscList (loop Set.empty items db) + where + -- Lexicographic depth-first search for frequent item sets. + loop previousFIS nextItems previousTxns + | Set.null nextItems = [] + | otherwise = + -- pTraceShow (["LCM", "frequentItemSets", "loop"], (("previousFIS", previousFIS), ("nextItems", nextItems), ("previousTxns", previousTxns))) $ + -- Map each item of `nextItems` to its transactions in `previousTxns`. + let nextItemToTxns = itemToTxns nextItems previousTxns + in (`List.concatMap` Map.toList nextItemToTxns) \(nextItem, nextTxns) -> + -- Keep only the itemsets which are supported by enough transactions. + if minSupp <= fromIntegral (Seq.length nextTxns) + then + let nextFIS = Set.insert nextItem previousFIS + in -- Keep only the itemsets which have already gathered enough items. + [ (nextFIS, nextTxns) + | minSize <= fromIntegral (Set.size nextFIS) + ] + <> + -- Recurse with each item of `nextItems` greater than `nextItem` + -- (to traverse the frequent item sets as a tree instead of a poset lattice), + -- and with the transactions containing `nextItem`. + loop nextFIS (Set.split nextItem nextItems & snd) (nextTxns & toList) + else [] + +-- | @ +-- `itemToTxns` items db +-- @ +-- maps each item of `items` to the transactions of `db` containing it. +-- +-- This maps from an "horizontal" representation to a "vertical" one, +-- itself mapping to an "horizontal" one. +-- See p.8 of https://www.lirmm.fr/~lazaar/imagina/NL-DM-IMAGINA1819-part1.pdf +-- +-- It's a kind of occurrence deliver. +-- p.4 of http://osdm.uantwerpen.be/papers/p77-uno.pdf +itemToTxns :: + Ord item => + ItemSet item -> + [Transaction item a] -> + Map item (Seq.Seq (Transaction item a)) +itemToTxns itms txs = + Map.fromListWith + (<>) + [ (itm, Seq.singleton tx) + | tx <- txs + , itm <- Set.intersection itms (transactionItems tx) & Set.toList + ] + +data AllItems db = AllItemsAxiom + +-- | `frequentItemSets` applied to all the items of the given transactions. +allFrequentItemSets :: + Ord item => + Show item => + Show a => + minSupp ::: Int / minSupp Logic.Ord.> Zero -> + minSize ::: Int / minSize Logic.Ord.> Zero -> + db ::: [Transaction item a] -> + FrequentItemSets (AllItems db) db minSupp minSize + ::: Clusters item a +allFrequentItemSets minSupp minSize db = + frequentItemSets + minSupp + minSize + (AllItemsAxiom ... foldMap transactionItems (unName db)) + db + +data ClosedFrequentItemSets items db minSupp minSize = ClosedFrequentItemSetsAxiom + +type Clusters item a = + Map + (ItemSet item) + (Seq.Seq (Transaction item a)) + +closedFrequentItemSets :: + forall item db minSize minSupp items. + HasCallStack => + Ord item => + Show item => + minSupp ::: Int / minSupp Logic.Ord.> Zero -> + minSize ::: Int / minSize Logic.Ord.> Zero -> + items ::: ItemSet item -> + db ::: [Set item] -> + ClosedFrequentItemSets items db minSupp minSize + ::: [(ItemSupport, ItemSet item)] +closedFrequentItemSets (Named minSupp) (Named minSize) (Named items) (Named db) = + ClosedFrequentItemSetsAxiom ... + runLCM items minSupp minSize db + +allClosedFrequentItemSets :: + HasCallStack => + Ord item => + Show item => + minSupp ::: Int / minSupp Logic.Ord.> Zero -> + minSize ::: Int / minSize Logic.Ord.> Zero -> + db ::: [Set item] -> + ClosedFrequentItemSets (AllItems db) db minSupp minSize + ::: [(ItemSupport, ItemSet item)] +allClosedFrequentItemSets minSupp minSize db = + closedFrequentItemSets + minSupp + minSize + (AllItemsAxiom ... fold (unName db)) + db + +type ItemSupport = Int +type ItemRank = Int + +runLCM :: + forall item. + Show item => + HasCallStack => + Ord item => + Set item -> + ItemSupport -> + Int -> + [Set item] -> + [(ItemSupport, Set item)] +runLCM items minSupp minSize db = + let + itemToSupp :: [(item, ItemSupport)] + itemToSupp = + itemToSupport items db + & Map.toList + & List.filter ((>= minSupp) . snd) + & List.sortBy (comparing (Down . snd)) + + itemsSize :: Int + itemsSize = List.length itemToSupp + + itemToRank :: Map item ItemRank + itemToRank = + Map.fromList + [ (i, List.head $ List.findIndices ((== i) . fst) itemToSupp) + | (i, _) <- itemToSupp + ] + + -- Rewrites the database to use `ItemRank` instead of `item` + rankDB :: [Set ItemRank] + rankDB = + [ Set.fromList + [ rank + | i <- tx & Set.toList + -- Items whose support is lower than `minSupp` + -- have been filtered-out in `itemToSupp`, + -- hence do not have a rank. + , rank <- Map.lookup i itemToRank & maybeToList + ] + | tx <- db + ] + + -- Rewrite the database as a `LexicoTreeItem` + dbLT = List.foldr (\tx acc -> insertLT (tx & Set.toList) (-1) 1 acc) Nil rankDB + + rankToItem :: Array ItemRank item + rankToItem = + List.zip [0 ..] (fst <$> itemToSupp) + & array (0, itemsSize - 1) + + unrank :: [(ItemSupport, Set ItemRank)] -> [(ItemSupport, Set item)] + unrank = List.map $ second $ Set.map (rankToItem `unsafeAt`) + in + [ lcmLoop minSupp minSize 1 dbLT Set.empty candidateRank (rankToSuppLT items dbLT) items + | candidateRank <- [0 .. Set.size items -1] + ] + & parBuffer 8 rdeepseq + & runEval + & List.concat + & unrank + +-- | +-- For a transaction database, a closed frequent Itemset, and a candidate item +-- for extension of this closed frequent Itemset, recursively computes all +-- the successor closed frequent Itemsets by PPC-extension. +lcmLoop :: + Show item => + ItemSupport -> + Int -> + -- | Current depth in the search tree (for parallel optimisation purposes) + Int -> + -- | Transaction database. + LexicoTreeItem -> + -- | Input closed frequent Itemset. + Set ItemRank -> + -- | Candidate to extend the closed frequent Itemset above. + ItemRank -> + -- | Array associating each item with its frequency + UArray ItemRank ItemSupport -> + -- | Maximal item + Set item -> + -- | Result : list of closed frequent Itemsets. Each result is a list of Items, the head of the list being the frequency of the item. + [(ItemSupport, Set ItemRank)] +lcmLoop minSupp minSize depth previousDB previousRanks candidateRank rankToSupp items = + let + -- HLCM: line 1: CDB = project and reduce DB w.r.t. P and limit + -- Reduce database by eliminating: + -- - all items greater than `candidateRank`, + -- - and all items with zero support. + reducedDB = projectAndReduce candidateRank rankToSupp previousDB + + -- HLCM: line 2: Compute frequencies of items in CDB + -- Compute items occurrences in reduced database. + reducedRankToSupp = rankToSuppLT items reducedDB + + -- HLCM: line 3: CP = 100% frequent items in CDB + -- Check which items actually appear in reduced database. + candidateSupp = rankToSupp ! candidateRank + + -- HLCM: line 6: Candidates = frequent items of CDB that are not in CP + -- Compute 100% frequent items, future candidates, and unfrequent items. + (closedFreqRanks, candidateRanks, unfreqRanks) = + computeCandidates minSupp candidateSupp items reducedRankToSupp + in + --pTraceShow (["lcmLoop"], minSupp, minSize, depth, previousDB, previousRanks, candidateRank, rankToSupp, items) $ + -- HLCM: line 4: if max(CP) = limit then + if not (List.null closedFreqRanks) -- if there is a result ... + && last closedFreqRanks <= candidateRank -- ...and if it is OK to extend it + then + let + -- HLCM: line 5: P' = P ∪ CP + -- Result closed frequent Itemset = input closed frequent Itemset + 100% frequent Items + closedItemset = previousRanks <> Set.fromList closedFreqRanks + + -- HLCM: line 8: for all e ∈ Candidates, e ≤ limit do + -- Only candidates with value lower than input candidateRank + -- can be used for further extension on this branch. + smallCandidates = List.takeWhile (< candidateRank) candidateRanks + in + [ (candidateSupp, closedItemset) + | minSize <= fromIntegral (Set.size closedItemset) + ] + <> if not (List.null smallCandidates) -- ... and if we have at least 1 possible extension + then + let + -- Update items occurrences table by suppressing: + -- - 100% frequent items, + -- - and unfrequent items. + newRankToSupp = suppressItems reducedRankToSupp closedFreqRanks unfreqRanks + + loop newCandidate = lcmLoop minSupp minSize (depth + 1) reducedDB closedItemset newCandidate newRankToSupp items + in + -- Recursively extend the candidates + if 3 < depth -- create parallel sparks only for low search space depth + then List.concat $ runEval $ parBuffer 2 rdeepseq $ List.map loop smallCandidates + else List.concatMap loop smallCandidates + else [] + else [] + +-- | +-- For a transaction database of type [[item]], compute the frequency +-- of each item and return an array (item, frequency). +itemToSupport :: + Ord item => + Set item -> + [Set item] -> + Map item ItemSupport +itemToSupport items db = + Map.fromListWith + (+) + [ (itm, 1) + | tx <- db + , itm <- Set.intersection items tx & Set.toList + ] + +-- | +-- For a given itemset being extended by a given candidate, compute : +-- - the closure of this itemset +-- - and the candidates for further extension. +computeCandidates :: + ItemSupport -> + ItemSupport -> + Set item -> + UArray ItemRank ItemSupport -> + -- (100% frequent items == closure, candidates for further extension, unfrequent items) + ([ItemRank], [ItemRank], [ItemRank]) +computeCandidates minSupp candidateSupp items rankToSupp = + let + (frequentItems, unfreqItems) = + List.partition + (\i -> rankToSupp ! i >= minSupp) + [i | i <- [0 .. Set.size items - 1], rankToSupp ! i > 0] + (closedFrequentRanks, candidateRanks) = + List.partition (\i -> rankToSupp ! i == candidateSupp) frequentItems + in + (closedFrequentRanks, candidateRanks, unfreqItems) + +-- | +-- Modifies an array associating Items with their frequency, in order to +-- give a frequency of 0 to a given list of Items. +-- +-- NB : for performance reasons, this is REALLY a modification, made with unsafe operations. +suppressItems :: + -- | Array associating an item with its frequency + UArray ItemRank ItemSupport -> + -- | List of 100% frequent Items + [ItemRank] -> + -- | List of unfrequent Items + [ItemRank] -> + -- | Initial array, with frequencies of 100% frequent Items + -- and unfrequent Items set to 0. + UArray ItemRank ItemSupport +suppressItems rankToSupp closedRanks unfreqRanks = + runST do + -- Can be used in multithread because no concurrent write + arr <- unsafeThaw rankToSupp :: ST s (STUArray s ItemRank ItemSupport) + forM_ closedRanks \i -> writeArray arr i 0 + forM_ unfreqRanks \i -> writeArray arr i 0 + -- Can be used in multithread because no concurrent write + unsafeFreeze arr + +----------------------------------------------------------------- +-- LEXICOGRAPHIC TREE MANIPULATION +----------------------------------------------------------------- + +-- | +-- Creates a new, reduced transaction database by eliminating all items +-- greater than @candidateRank@ item, and all infrequent Items. +projectAndReduce :: + -- | Candidate item, on which the projection is made + ItemRank -> + -- | Array associating each item with its frequency in + -- original transaction database. + UArray ItemRank ItemSupport -> + -- | Original transaction database + LexicoTreeItem -> + -- | Result : Reduced transaction database + LexicoTreeItem +projectAndReduce !candidateRank rankToSupp = go + where + go Nil = Nil + go (Node e suiv alt w) + | e > candidateRank = Nil + | e == candidateRank = + let !(suiv', addWeight) = filterInfrequent suiv rankToSupp + in Node e suiv' Nil (w + addWeight) + | otherwise = + let + !alt' = go alt + !suiv' = go suiv + in + if rankToSupp ! e > 0 + then + if notNil suiv' && notNil alt' + then Node e suiv' alt' 0 + else if notNil suiv' then Node e suiv' Nil 0 else alt' + else + if notNil suiv' && notNil alt' + then mergeAlts suiv' alt' + else if notNil suiv' then suiv' else alt' + +type Weight = Int + +-- | +-- Suppress all infrequent Items from a transaction database expressed as +-- lexicographic tree, and returns a new lexicographic tree. +filterInfrequent :: + -- | Original transaction database + LexicoTreeItem -> + -- | Array associating each item with its frequency in + -- original transaction database. In this setting, + -- an infrequent item as a frequency of 0 (because of preprocessing by + -- ' suppressItems '). + UArray ItemRank ItemSupport -> + -- | Result : (transaction database without infrequent Items, weight to report in parent nodes) + (LexicoTreeItem, Weight) +filterInfrequent Nil _ = (Nil, 0) +filterInfrequent (Node e suiv alt w) occs + | occs ! e > 0 = (Node e suiv' alt' (w + ws), wa) + | notNil suiv' && notNil alt' = (mergeAlts suiv' alt', w') + | notNil alt' = (alt', w') + | notNil suiv' = (suiv', w') + | otherwise = (Nil, w') + where + w' = w + ws + wa + !(suiv', ws) = filterInfrequent suiv occs + !(alt', wa) = filterInfrequent alt occs + +{-# INLINE notNil #-} +notNil :: LexicoTreeItem -> Bool +notNil Nil = False +notNil _ = True + +-- | +-- Occurence delivering: +-- Map each item of the given database to its support. +rankToSuppLT :: + Set item -> + -- | Transaction database (in lexicographic tree format) + LexicoTreeItem -> + -- | Result : array associating each item to its frequency. + UArray ItemRank ItemSupport +rankToSuppLT items dbLT = + runST do + arr <- newArray_ (0, Set.size items - 1) + -- TODO: this workaround should no longer be necessary + -- Creates an empty array : each item starts with frequency 0 + -- workaround for http://hackage.haskell.org/trac/ghc/ticket/3586 + forM_ [0 .. Set.size items - 1] $ \i -> unsafeWrite arr i 0 + -- Compute frequencies for each item by efficient tree traversal + _ <- traverseLT dbLT arr + unsafeFreeze arr + +-- | +-- Efficient traversal of the transaction database as a lexicographic tree. +-- Items frequencies are updated on the fly. +traverseLT :: + forall s. + -- | Transaction database + LexicoTreeItem -> + -- | Array associating each item with its frequency. UPDATED by this function ! + STUArray s ItemRank ItemSupport -> + ST s () +traverseLT tree arr = ST $ \s -> + case go tree s of + (# s', _ #) -> (# s', () #) + where + go :: + LexicoTreeItem -> + State# s -> + (# State# s, Int# #) + go Nil s = (# s, 0# #) + go (Node item child alt w@(I# w#)) s0 = + case go child s0 of + (# s1, childw #) -> + case go alt s1 of + (# s2, altw #) -> + case unsafeRead arr item of + ST f -> + case f s2 of + (# _s3, I# itemw #) -> + case unsafeWrite arr item (I# itemw + I# childw + w) of + ST f' -> + case f' s2 of + (# s4, _ #) -> (# s4, childw +# w# +# altw #) + +-- RankToSupp + +-- | Type for a lexicographic tree, implementating a n-ary tree over a binary tree. +data LexicoTreeItem + = -- | Void node + Nil + | -- | A node : item, next node (next in transaction), alternative node (other branch), weight + Node + {-# UNPACK #-} !ItemRank + !LexicoTreeItem -- NB. experimental strictness annotation + !LexicoTreeItem -- NB. experimental strictness annotation + {-# UNPACK #-} !Int + deriving (Eq, Show) + +-- | +-- Inserts a transaction in list format into the lexicographic tree. +-- Automatically merges identical transactions. +-- Performs suffix intersection. +insertLT :: + -- | Transaction to insert into lexicographic tree + [ItemRank] -> + -- | "coreI" item, for suffix intersection. + ItemRank -> + -- | Weight of the transaction to insert + ItemSupport -> + -- | Input lexicographic tree + LexicoTreeItem -> + -- | Result : a new lexicographic tree with the transaction inserted + LexicoTreeItem +insertLT [] _ _ lt = lt +insertLT lst _ w Nil = createPath lst w +insertLT [x] i w (Node e suiv alt weight) + | x < e = Node x Nil (Node e suiv alt weight) w + | x == e = Node e suiv alt (weight + w) + | x > e = Node e suiv (insertLT [x] i w alt) weight +insertLT (x : xs) i w (Node e suiv alt weight) + | x < e = Node x (createPath xs w) (Node e suiv alt weight) 0 + | x == e = + if (e /= i) + then Node e (insertLT xs i w suiv) alt weight + else suffixIntersectionLT xs w (Node e suiv alt weight) + | x > e = Node e suiv (insertLT (x : xs) i w alt) weight +insertLT _ _ _ _ = error "insertLT" + +-- | +-- From a transaction and its weight, directly creates a path-shaped lexicographic tree. +createPath :: + -- | Transaction + [ItemRank] -> + -- | Weight of the transaction + Int -> + -- | Result : a path-shaped lexicographic tree encoding the transaction + LexicoTreeItem +createPath [] _ = Nil +createPath [x] w = Node x Nil Nil w +createPath (x : xs) w = Node x (createPath xs w) Nil 0 + +-- | +-- Perform the "suffix intersection" operation with the suffix of a transaction +-- and the corresponding part of a lexicographic tree. +-- +-- For more details, see "prefixIntersection" in Takeaki Uno's papers about LCM. +suffixIntersectionLT :: + -- | Suffix of the transaction to insert. + [ItemRank] -> + -- | Weight of the transaction to insert + Int -> + -- | (Sub-)lexicographic tree where the transaction must be inserted. The @next@ part (see data type comments) + -- should be a simple path, it will be the target of intersection with the suffix. + LexicoTreeItem -> + -- | Result : lexicographic tree where the suffix has been added, with correct intersections performed. + LexicoTreeItem +suffixIntersectionLT _ w (Node e Nil alt weight) = Node e Nil alt (weight + w) +suffixIntersectionLT lst w (Node e suiv alt weight) = + let (newSuiv, addWeight) = suffInterSuiv lst w suiv + in Node e newSuiv alt (weight + addWeight) +suffixIntersectionLT _ _ _ = error "suffixIntersectionLT" + +-- | +-- Intersects a list-shaped transaction and a path-shaped lexicographic tree. +-- The result is a path shaped lexicographic tree with weights correctly updated. +suffInterSuiv :: + -- | Transaction as list + [ItemRank] -> + -- | Weight of the above transaction + Int -> + -- | Path-shaped lexicographic tree + LexicoTreeItem -> + -- | Result : (path-shaped lexicographic tree representing the intersection + -- of transaction and input path , 0 if intersection not [] / sum of weights else) + (LexicoTreeItem, Int) +suffInterSuiv lst w suiv = + let + (lstSuiv, weightSuiv) = getLstSuiv suiv + inter = List.intersect lstSuiv lst + in + if (inter /= []) + then (createPath inter (weightSuiv + w), 0) + else (Nil, weightSuiv + w) + +-- | +-- Collects all the nodes of lexicographic tree in a list of elements. +getLstSuiv :: + -- | Path shaped lexicographic tree. + LexicoTreeItem -> + -- | Result : (list of elements in the path, sum of weights of nodes in the path) + ([ItemRank], Int) +getLstSuiv Nil = ([], 0) +getLstSuiv (Node e suiv Nil weight) = + let (lst, w) = getLstSuiv suiv + in (e : lst, w + weight) +getLstSuiv _ = error "getLstSuiv" + +-- | +-- Merge two lexicographic trees. +mergeAlts :: LexicoTreeItem -> LexicoTreeItem -> LexicoTreeItem +mergeAlts Nil lt = lt +mergeAlts lt Nil = lt +mergeAlts (Node e1 suiv1 alt1 w1) (Node e2 suiv2 alt2 w2) + | e1 < e2 = (Node e1 suiv1 (mergeAlts alt1 (Node e2 suiv2 alt2 w2)) w1) + | e1 > e2 = (Node e2 suiv2 (mergeAlts (Node e1 suiv1 alt1 w1) alt2) w2) + | e1 == e2 = (Node e1 (mergeAlts suiv1 suiv2) (mergeAlts alt1 alt2) (w1 + w2)) +mergeAlts _ _ = error "mergeAlts" diff --git a/src/Clustering/FrequentItemSet/References.hs b/src/Clustering/FrequentItemSet/References.hs new file mode 100644 index 0000000..ffa0c80 --- /dev/null +++ b/src/Clustering/FrequentItemSet/References.hs @@ -0,0 +1,31 @@ +module Clustering.FrequentItemSet.References where + +data RefAgrawalSrikantApriori +-- ^ Reference to [Fast Algorithms for Mining Association Rules](http://www.vldb.org/conf/1994/P487.PDF) +-- +-- > { +-- > "author": [ +-- > { +-- > "family": "Agrawal", +-- > "given": "Rakesh" +-- > }, +-- > { +-- > "family": "Srikant", +-- > "given": "Ramakrishnan" +-- > } +-- > ], +-- > "container-title": "Proc. 20th Int. Conf. Very Large Data Bases VLDB", +-- > "id": "article", +-- > "issued": { +-- > "date-parts": [ +-- > [ +-- > 2000, +-- > 8 +-- > ] +-- > ] +-- > }, +-- > "page": "", +-- > "title": "Fast Algorithms for Mining Association Rules", +-- > "type": "article-journal", +-- > "volume": "1215" +-- > } diff --git a/src/Clustering/UnionFind/ST.hs b/src/Clustering/UnionFind/ST.hs new file mode 100644 index 0000000..3af1e50 --- /dev/null +++ b/src/Clustering/UnionFind/ST.hs @@ -0,0 +1,198 @@ +-- SPDX-License-Identifier: BSD-3-Clause +-- SPDX-FileCopyrightText: 2012 Thomas Schilling +-- +{-# OPTIONS_GHC -funbox-strict-fields #-} + +-- | An implementation of Tarjan's UNION-FIND algorithm. +-- (Robert E Tarjan. \"Efficiency of a Good But Not Linear Set Union Algorithm\", JACM 22(2), 1975) +-- +-- The algorithm implements three operations efficiently (all amortised @O(1)@): +-- +-- 1. Check whether two elements are in the same equivalence class. +-- +-- 2. Create a union of two equivalence classes. +-- +-- 3. Look up the descriptor of the equivalence class. +-- +-- The implementation is based on mutable references. +-- Each equivalence class has exactly one member that serves +-- as its representative element. +-- Every element either is the representative element of its equivalence class +-- or points to another element in the same equivalence class. +-- Equivalence testing thus consists of following the pointers +-- to the representative elements and then comparing these for identity. +-- +-- The algorithm performs lazy path compression. +-- That is, whenever we walk along a path greater than length 1 +-- we automatically update the pointers along the path to directly point +-- to the representative element. +-- Consequently future lookups will be have a path length of at most 1. +-- +-- Adapted from Thomas Schilling's union-find package: +-- https://hackage.haskell.org/package/union-find +module Clustering.UnionFind.ST ( + Point, + fresh, + repr, + union, + unionWith, + equivalent, + redundant, + descriptor, + setDescriptor, + modifyDescriptor, +) +where + +import Control.Applicative +import Control.Monad (Monad (..), when) +import Control.Monad.ST +import Data.Bool (Bool (..)) +import Data.Eq (Eq (..)) +import Data.Function (($)) +import Data.Int (Int) +import Data.Ord (Ord (..)) +import Data.STRef +import Prelude (error, (+)) + +-- | The abstract type of an element of the sets we work on. It is +-- parameterised over the type of the descriptor. +newtype Point s a = MkPoint (STRef s (Link s a)) + deriving Eq -- Pointer equality on STRef + +-- TODO: unpack Info +-- as in https://github.com/haskell/cabal/blob/8815e0a3e76e05cac91b8a88ce7d590afb07ef71/Cabal/src/Distribution/Utils/UnionFind.hs +data Link s a + = -- | This is the descriptive element of the equivalence class. + Info {-# UNPACK #-} !(STRef s (Info a)) + | -- | Pointer to some other element of the equivalence class. + Link {-# UNPACK #-} !(Point s a) + deriving (Eq) + +unInfo :: Link s a -> STRef s (Info a) +unInfo = \case + Info x -> x + _ -> error "unInfo" + +data Info a = MkInfo + { weight :: {-# UNPACK #-} !Int + -- ^ The size of the equivalence class, used by 'union'. + , descr :: a + } + deriving (Eq) + +-- | /O(1)/. +-- Create a fresh equivalence class and return it. A fresh point is in +-- the equivalence class that contains only itself. +fresh :: a -> ST s (Point s a) +fresh desc = do + info <- newSTRef (MkInfo{weight = 1, descr = desc}) + l <- newSTRef (Info info) + return (MkPoint l) + +-- | /O(1)/. @repr point@ +-- returns the representative point of @point@'s equivalence class. +-- +-- This method performs the path compresssion. +repr :: Point s a -> ST s (Point s a) +repr point@(MkPoint l) = do + link <- readSTRef l + case link of + Info _ -> return point + Link pt'@(MkPoint l') -> do + pt'' <- repr pt' + when (pt'' /= pt') $ do + -- At this point we know that @pt'@ is not the representative + -- element of @point@'s equivalent class. Therefore @pt'@'s + -- link must be of the form @Link r@. We write this same + -- value into @point@'s link reference and thereby perform + -- path compression. + link' <- readSTRef l' + writeSTRef l link' + return pt'' + +-- | Return the reference to the point's equivalence class's descriptor. +descrRef :: Point s a -> ST s (STRef s (Info a)) +descrRef point@(MkPoint link_ref) = do + link <- readSTRef link_ref + case link of + Info info -> return info + Link (MkPoint link'_ref) -> do + -- Unrolling for the length == 1 case. + link' <- readSTRef link'_ref + case link' of + Info info -> return info + _ -> repr point >>= descrRef + +-- | /O(1)/. Return the descriptor associated with argument point's +-- equivalence class. +descriptor :: Point s a -> ST s a +descriptor point = descr <$> (descrRef point >>= readSTRef) + +-- | /O(1)/. Replace the descriptor of the point's equivalence class +-- with the second argument. +setDescriptor :: Point s a -> a -> ST s () +setDescriptor point new_descr = do + r <- descrRef point + modifySTRef r $ \i -> i{descr = new_descr} + +modifyDescriptor :: Point s a -> (a -> a) -> ST s () +modifyDescriptor point f = do + r <- descrRef point + modifySTRef r $ \i -> i{descr = f (descr i)} + +-- | /O(1)/. Join the equivalence classes of the points (which must be +-- distinct). The resulting equivalence class will get the descriptor +-- of the second argument. +union :: Point s a -> Point s a -> ST s () +union p1 p2 = unionWith p1 p2 (\_ d2 -> return d2) + +-- | Like 'union', but sets the descriptor returned from the callback. +-- +-- The intention is to keep the descriptor of the second argument to +-- the callback, but the callback might adjust the information of the +-- descriptor or perform side effects. +unionWith :: Point s a -> Point s a -> (a -> a -> ST s a) -> ST s () +unionWith p1 p2 update = do + point1@(MkPoint link_ref1) <- repr p1 + point2@(MkPoint link_ref2) <- repr p2 + -- The precondition ensures that we don't create cyclic structures. + when (point1 /= point2) $ do + info_ref1 <- unInfo <$> readSTRef link_ref1 + info_ref2 <- unInfo <$> readSTRef link_ref2 + MkInfo w1 d1 <- readSTRef info_ref1 -- d1 is discarded + MkInfo w2 d2 <- readSTRef info_ref2 + d2' <- update d1 d2 + -- Make the smaller tree a subtree of the bigger one. + -- The idea is this: We increase the path length of one set by one. + -- Assuming all elements are accessed equally often, + -- this means the penalty is smaller if we do it + -- for the smaller set of the two. + if w1 >= w2 + then do + writeSTRef link_ref2 (Link point1) + writeSTRef info_ref1 (MkInfo (w1 + w2) d2') + else do + writeSTRef link_ref1 (Link point2) + writeSTRef info_ref2 (MkInfo (w1 + w2) d2') + +-- | /O(1)/. Return @True@ if both points belong to the same +-- | equivalence class. +equivalent :: Point s a -> Point s a -> ST s Bool +equivalent p1 p2 = (==) <$> repr p1 <*> repr p2 + +-- | /O(1)/. Returns @True@ for all but one element of an equivalence class. +-- That is, if @ps = [p1, .., pn]@ are all in the same +-- equivalence class, then the following assertion holds. +-- +-- > do rs <- mapM redundant ps +-- > assert (length (filter (==False) rs) == 1) +-- +-- It is unspecified for which element function returns @False@, +-- so be really careful when using this. +redundant :: Point s a -> ST s Bool +redundant (MkPoint link_r) = do + link <- readSTRef link_r + case link of + Info _ -> return False + Link _ -> return True diff --git a/src/Numeric/Probability.hs b/src/Numeric/Probability.hs new file mode 100644 index 0000000..1314d4d --- /dev/null +++ b/src/Numeric/Probability.hs @@ -0,0 +1,113 @@ +-- For `ProbabilityScale` +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE InstanceSigs #-} + +module Numeric.Probability ( + Probability, + ProbabilityScale, + ProbabilityBounded (..), + probability, + safeProbability, + runProbability, + assertProbability, + proba0, + proba1, +) +where + +import Control.Monad (Monad (..)) +import Data.Bool (Bool) +import Data.Eq (Eq(..)) +import Data.Function (id, on, (.)) +import Data.Maybe (Maybe (..), fromJust) +import Data.Monoid (Monoid (..)) +import Data.Ord (Ord (..), Ordering) +import Data.Proxy (Proxy (..)) +import Data.Semigroup (Semigroup (..)) +import Data.Validity (Validity (..), declare) +import Data.Word (Word64) +import GHC.Generics (Generic) +import GHC.Real (RealFrac(..)) +import GHC.Stack (HasCallStack) +import GHC.TypeNats (Natural, natVal) +import Logic +import Logic.Theory.Bool (type (&&)) +import Logic.Theory.Ord (type (<=)) +import Numeric.Decimal (Decimal (..), MonadThrow (..)) +import Numeric.Decimal qualified as Decimal +import System.Random (Random) +import Text.Show (Show (show)) +import Prelude (Bounded (..), Enum, Fractional (..), Integral, Num (..), Rational, Real(..), error, (^)) + +type Probability = Decimal Decimal.RoundHalfEven ProbabilityScale ProbabilityBounded +instance Validity Probability where + validate (Decimal wb) = validate wb + +probability :: MonadThrow m => Rational -> m Probability +probability = Decimal.fromRationalDecimalBoundedWithRounding +{-# INLINE probability #-} + +proba0 :: Probability +proba1 :: Probability +proba0 = fromJust (probability 0) +proba1 = fromJust (probability 1) + +safeProbability :: r ::: Rational / r <= 0 && r <= 1 -> Probability +safeProbability (Named r) = fromJust (probability r) + +runProbability :: Probability -> Rational +runProbability = Decimal.toRationalDecimal +{-# INLINE runProbability #-} + +assertProbability :: HasCallStack => Rational -> Probability +assertProbability r = case probability r of + Just p -> p + Nothing -> error ("assertProbability: " <> show r) + +instance Num (Decimal.Arith Probability) where + (+) = Decimal.bindM2 Decimal.plusDecimalBounded + (-) = Decimal.bindM2 Decimal.minusDecimalBounded + (*) = Decimal.bindM2 Decimal.timesDecimalBoundedWithRounding + abs = id + signum m = m >>= Decimal.signumDecimalBounded + fromInteger = Decimal.fromIntegerDecimalBoundedIntegral + +instance Fractional (Decimal.Arith Probability) where + (/) = Decimal.bindM2 Decimal.divideDecimalBoundedWithRounding + fromRational = probability + +instance Eq (Decimal.Arith Probability) where + --(==) :: HasCallStack => Decimal.Arith Probability -> Decimal.Arith Probability -> Bool + (==) = (==) `on` Decimal.arithError + +instance Ord (Decimal.Arith Probability) where + --compare :: HasCallStack => Decimal.Arith Probability -> Decimal.Arith Probability -> Ordering + compare = compare `on` Decimal.arithError + +instance Real (Decimal.Arith Probability) where + toRational = Decimal.toRationalDecimal . Decimal.arithError + +instance RealFrac (Decimal.Arith Probability) where + properFraction p = (n, return (assertProbability f)) + where + (n,f) = properFraction (Decimal.toRationalDecimal (Decimal.arithError p)) + +-- >>> 10^19 <= (fromIntegral (maxBound :: Word64) :: Integer +-- True +-- >>> 10^20 <= (fromIntegral (maxBound :: Word64) :: Integer +-- False +type ProbabilityScale = 19 + +newtype ProbabilityBounded = ProbabilityBounded {unProbabilityBounded :: Word64} + deriving (Show, Eq, Ord, Num, Real, Integral, Enum, Random, Generic) +instance Bounded ProbabilityBounded where + minBound = ProbabilityBounded 0 + maxBound = ProbabilityBounded (10 ^ (natVal (Proxy @ProbabilityScale))) +instance Validity ProbabilityBounded where + validate (ProbabilityBounded w) = + mconcat + [ declare ("The contained word is smaller or equal to 10 ^ ProbabilityScale = " <> show (10 ^ n :: Natural)) (w <= 10 ^ n) + ] + where + n :: Natural + n = natVal (Proxy @ProbabilityScale) diff --git a/src/Phylomemy.hs b/src/Phylomemy.hs new file mode 100644 index 0000000..115496a --- /dev/null +++ b/src/Phylomemy.hs @@ -0,0 +1,13 @@ +module Phylomemy ( + module Phylomemy.Indexation, + module Phylomemy.Similarity, + module Phylomemy.TemporalMatching, + module Phylomemy.DOT, + module Numeric.Probability, +) where + +import Phylomemy.Indexation +import Phylomemy.Similarity +import Phylomemy.TemporalMatching +import Phylomemy.DOT +import Numeric.Probability diff --git a/src/Phylomemy/DOT.hs b/src/Phylomemy/DOT.hs new file mode 100644 index 0000000..8f03ab6 --- /dev/null +++ b/src/Phylomemy/DOT.hs @@ -0,0 +1,222 @@ +{-# LANGUAGE OverloadedStrings #-} + +module Phylomemy.DOT where + +-- import Debug.Pretty.Simple (pTraceShow, pTraceShowM) +import Control.Applicative (Applicative (..)) +import Control.Monad (Monad (..), foldM_, forM_, mapM_, when, zipWithM_) +import Control.Monad.Trans.Class qualified as MT +import Control.Monad.Trans.Reader qualified as MT +import Control.Monad.Trans.Writer.CPS qualified as MT +import Data.Bool (otherwise) +import Data.ByteString.Builder qualified as BS +import Data.ByteString.Short qualified as BSh +import Data.Eq (Eq (..)) +import Data.Foldable (foldMap', toList) +import Data.Function (on, ($), (&), (.)) +import Data.Functor ((<&>)) +import Data.Int (Int) +import Data.List qualified as List +import Data.Map.Strict qualified as Map +import Data.Maybe (Maybe (..), maybe) +import Data.Monoid (Monoid (..)) +import Data.Ord (Ord (..)) +import Data.Semigroup (Semigroup (..)) +import Data.Set (Set) +import Data.Set qualified as Set +import Data.String (String) +import Data.Text.Short qualified as TS +import Data.Tree qualified as Tree +import Data.Tuple (snd) +import GHC.Real (floor) +import Numeric (showFFloat) +import Numeric.Probability +import Text.Printf (printf) +import Text.Show (Show (..)) +import Prelude + +import Phylomemy.Indexation +import Phylomemy.TemporalMatching + +-- | @(`dotMaximalSpanningForest` msf)@ +-- returns a graph of the given `MaximalSpanningForest` +-- in [DOT](https://graphviz.org/doc/info/lang.html) format. +dotMaximalSpanningForest :: + forall range cluster. + Show range => + Show cluster => + Ord range => + Ord cluster => + ShowHuman range => + ShowHuman cluster => + MaximalSpanningForest range cluster -> + BS.Builder +dotMaximalSpanningForest msf = runDOT do + let sortedMSF = msf & List.sortBy (compare `on` mstNodeRangeCluster . Tree.rootLabel) + let rangeToMSTToClusters :: range :-> {-mstI-} Int :-> (Maybe Similarity, Set cluster) = + let merge = Map.unionWith \(minSimil, x) (_minSimil, y) -> (minSimil, Set.union x y) + in Map.unionsWith + merge + [ Map.fromListWith merge $ + case mst of + Tree.Node MSTNode{mstNodeRangeCluster = (rootR, rootC)} ts -> + (rootR, Map.singleton mstI (minSimil, Set.singleton rootC)) + : [ (range, Map.singleton mstI (minSimil, Set.singleton cluster)) + | MSTNode{mstNodeRangeCluster = (range, cluster)} <- ts & List.concatMap toList + ] + | (mstI, mst) <- sortedMSF & List.zip [1 :: Int ..] + , let minSimil = mstMinimalSimilarity mst + ] + let showSimilarity (s :: Similarity) = showFFloat (Just 2) (s & runProbability & fromRational @Double) "" + dotComments [(BS.stringUtf8 $ show $ rangeToMSTToClusters & Map.map Map.keys)] + -- pTraceShow ("num of nodes", Map.size nodeToBranch, "num of branches", Map.size msf) $ + dotLine "digraph g" + dotBlock do + dotLine "splines=\"ortho\"" + indexFrom1M_ (rangeToMSTToClusters & Map.toList) \(srcR, mstToClusters) srcRI -> do + let srcRB = "r" <> BS.intDec srcRI + dotLine $ "subgraph cluster_" <> srcRB + dotBlock do + dotComments ["Create a node for the range " <> srcRB] + dotNode + srcRB + [ ("shape", "box") + , ("label", builderQuotedString (showHuman srcR)) + , ("color", "gray") + , ("style", "filled") + , ("fillcolor", "gray") + ] + dotLine "color=gray" + dotBlock do + dotLine "rank=same" + dotComments ["Create the cluster nodes within the range " <> srcRB] + forM_ (mstToClusters & Map.toList) \(mstI, (minSimil, clusters)) -> do + indexFrom1M_ (clusters & toList) \srcC srcCI -> do + dotNodeCluster + srcRI + mstI + srcCI + [ ("label", builderQuotedString $ showHuman srcC <> "\nT" <> printf "%03d" mstI <> maybe "" (("\n" <>) . showSimilarity) minSimil) + , ("style", "filled") + , minSimil & maybe ("", "") (\s -> ("fillcolor", 1 + ((floor (runProbability s * 10)) `mod` 10) & BS.intDec)) + , ("colorscheme", "ylorrd9") + , ("shape", "box") + ] + dotComments ["Horizontally align the cluster nodes within the same range"] + let row = + [ (mstI, clusterI) + | (mstI, (_minSimil, clusters)) <- mstToClusters & Map.toList + , clusterI <- [1 .. Set.size clusters] + ] + case row of + [] -> return () + c@(firstTI, firstCI) : cs -> do + dotEdges + [srcRB, srcRB <> "t" <> BS.intDec firstTI <> "c" <> BS.intDec firstCI] + [ ("style", "invis") + ] + cs & (`foldM_` c) \(srcTI, srcCI) dst@(dstTI, dstCI) -> do + dotEdgesCluster + [(srcRI, srcTI, srcCI), (srcRI, dstTI, dstCI)] + [ ("weight", "10") + , ("style", "invis") + ] + return dst + indexFrom1M_ sortedMSF \mst mstI -> do + dotComments ["Create the edges of the MST " <> BS.intDec mstI] + -- pTraceShowM (mstI, List.length (Tree.flatten mst)) + let loop (Tree.Node MSTNode{mstNodeRangeCluster = src} dsts) = do + forM_ dsts \dstNode@(Tree.Node MSTNode{mstNodeRangeCluster = dst, mstNodeSimilarity = simil} _) -> do + -- let similB = BS.stringUtf8 $ showFFloat (Just 2) (simil & runProbability & fromRational @Double) "" + let indexRangeCluster (r, c) = + ( 1 + Map.findIndex r rangeToMSTToClusters + , mstI + , 1 + Set.findIndex c (rangeToMSTToClusters Map.! r Map.! mstI & snd) + ) + dotEdgesCluster + [ indexRangeCluster src + , indexRangeCluster dst + ] + [ ("constraint", "false") + , ("color", (floor (runProbability simil * 10)) `mod` 10 & BS.intDec) + , ("colorscheme", "ylorrd9") + , -- , ("label", similB) + ("fontcolor", "blue") + , ("dir", "both") + , ("arrowhead", "dot") + , ("arrowtail", "dot") + ] + loop dstNode + loop mst + dotRanges rangeToMSTToClusters + +dotRanges :: range :-> a -> DOT +dotRanges rangeTo = do + dotComments ["Vertically align range nodes"] + let rangeLinks = + [ "r" <> BS.intDec srcRI + | srcRI <- [1 .. Map.size rangeTo] + ] + when (1 < List.length rangeLinks) do + dotEdges rangeLinks [("weight", "10"), ("style", "invis")] + +dotNodeCluster :: Int -> Int -> Int -> [(BS.Builder, BS.Builder)] -> DOT +dotNodeCluster r t c = dotNode ("r" <> BS.intDec r <> "t" <> BS.intDec t <> "c" <> BS.intDec c) + +dotEdgesCluster :: [(Int, Int, Int)] -> [(BS.Builder, BS.Builder)] -> DOT +dotEdgesCluster rtc = + dotEdges + [ ("r" <> BS.intDec r <> "t" <> BS.intDec t <> "c" <> BS.intDec c) + | (r, t, c) <- rtc + ] + +-- Alternative to `Show` to get a more human-readable `String`. +class ShowHuman a where + showHuman :: a -> String +instance ShowHuman (Set.Set Root) where + showHuman a = + mconcat (List.intersperse " & " (as <&> TS.unpack)) + where + as = a & Set.toList <&> unNgram . rootLabel +instance ShowHuman Int where + showHuman = show + +type DOT = MT.ReaderT BSh.ShortByteString (MT.Writer BS.Builder) () + +indexFrom1M_ :: Applicative m => [a] -> (a -> Int -> m b) -> m () +indexFrom1M_ xs f = zipWithM_ f xs [1 :: Int ..] + +runDOT :: DOT -> BS.Builder +runDOT = MT.execWriter . (`MT.runReaderT` {-indent-} "") + +dotBlock :: DOT -> DOT +dotBlock s = do + dotLine "{" + () <- MT.withReaderT (" " <>) s + dotLine "}" + +dotLine :: BS.Builder -> DOT +dotLine s = do + indent <- MT.ask + MT.lift $ MT.tell $ BS.shortByteString indent <> s <> "\n" + +dotComments :: [BS.Builder] -> DOT +dotComments = mapM_ \c -> dotLine $ "// " <> c + +dotEdges :: [BS.Builder] -> [(BS.Builder, BS.Builder)] -> DOT +dotEdges names as = dotLine $ mconcat (List.intersperse " -> " names) <> builderAttrs as + +dotNode :: BS.Builder -> [(BS.Builder, BS.Builder)] -> DOT +dotNode name as = dotLine $ name <> builderAttrs as + +builderAttrs :: [(BS.Builder, BS.Builder)] -> BS.Builder +builderAttrs as + | List.null as = "" + | otherwise = "[" <> mconcat (List.intersperse "," [k <> "=" <> v | (k, v) <- as, BS.toLazyByteString k /= ""]) <> "]" + +builderQuotedString :: String -> BS.Builder +builderQuotedString cs = BS.charUtf8 '"' <> foldMap' escape cs <> BS.charUtf8 '"' + where + escape '\\' = BS.charUtf8 '\\' <> BS.charUtf8 '\\' + escape '\"' = BS.charUtf8 '\\' <> BS.charUtf8 '\"' + escape c = BS.charUtf8 c diff --git a/src/Phylomemy/Indexation.hs b/src/Phylomemy/Indexation.hs new file mode 100644 index 0000000..6880019 --- /dev/null +++ b/src/Phylomemy/Indexation.hs @@ -0,0 +1,157 @@ +{-# OPTIONS_GHC -Wno-orphans #-} + +module Phylomemy.Indexation +where + +-- TODO: ( … ) + +import Data.Eq (Eq (..)) +import Data.Foldable (toList) +import Data.Function ((&)) +import Data.Functor ((<&>)) +import Data.Int (Int) +import Data.Map.Strict qualified as Map +import Data.Monoid (Monoid (..)) +import Data.Ord (Ord (..)) +import Data.Semigroup (Semigroup (..)) +import Data.Sequence qualified as Seq +import Data.Set qualified as Set +import Data.String (IsString (..)) +import Data.Text.Short (ShortText) +import Data.Validity (Validity (..), declare, delve, trivialValidation) +import Data.Validity.Map () +import Data.Validity.Set () +import Data.Validity.Time () +import GHC.Generics (Generic) +import Logic +import Logic.Theory.Arithmetic (Zero) +import Logic.Theory.Ord (type (>)) + +-- import Numeric.Probability (Probability) +import Text.Show (Show) + +import Clustering.FrequentItemSet.LCM qualified as Clustering + +-- | A contiguous sequence of n terms +newtype Ngram = Ngram {unNgram :: ShortText} + deriving (Eq, Generic, Ord, IsString) + deriving newtype (Show) + +instance Validity Ngram where + validate = trivialValidation + +-- | A 'Root' is a set of `Ngram`s conveying the same meaning +-- (according to the analyst). +data Root = Root + { rootLabel :: Ngram + , rootSynonyms :: Set.Set Ngram + } + deriving (Eq, Generic, Ord, Show) + +instance IsString Root where + fromString s = + Root + { rootLabel = fromString s + , rootSynonyms = Set.empty + } + +instance Validity Root where + validate r = + mconcat + [ delve "rootLabel" (rootLabel r) + , declare + "The rootLabel is not a member of the rootSynonyms" + (Set.notMember (rootLabel r) (rootSynonyms r)) + , delve "rootSynonyms" (rootSynonyms r) + ] + +type Roots = Clustering.ItemSet Root +type Foundations = Set.Set Root + +data Document pos = Document + { documentPosition :: pos + -- ^ A position could be a date, a section, a page, an IP address, … + , documentRoots :: Map.Map Root () + -- , documentContent :: a + } + deriving (Eq, Generic, Show) +instance Validity pos => Validity (Document pos) + +type DocumentByRange range pos = Map.Map range (Seq.Seq (Document pos)) + +documentsByRange :: Ord range => (pos -> range) -> [Document pos] -> DocumentByRange range pos +documentsByRange mapKey docs = + Map.fromListWith + (<>) + [ (mapKey (documentPosition doc), Seq.singleton doc) + | doc <- docs + ] + +-- | "Clustering.FrequentItemSet.BruteForce" +-- and [the BF]("Clustering.FrequentItemSet.BruteForce") +-- and [the Doc]("Document") +data RootsOf docs = RootsOfAxiom + +-- @ +-- `clusterize` roots minSupp minSize docs +-- @ +-- returns for each range the clusters of `Document` +-- according to the frequent item set similarity. +-- +-- TODO: If a given period eventually ends up without any FIS, +-- we lower both the support and the size until we succeed in repopulating it. +clusterize :: + Show pos => + roots ::: Roots -> + minSupp ::: Int / minSupp > Zero -> + minSize ::: Int / minSize > Zero -> + docsByRange ::: (range :-> Seq.Seq (Document pos)) -> + range + :-> + -- Clustering.ClosedFrequentItemSets roots (RootsOf docsByRange) minSupp minSize ::: + Cluster + :-> Seq.Seq (Clustering.Transaction Root (Document pos)) +clusterize roots minSupp minSize (Named docsByRange) = + -- TODO: currently `Clustering.closedFrequentItemSets` does not return the `Transaction`s + -- supporting the closed FIS, maybe it should do it. + -- In the meantime, collect those after running `closedFrequentItemSets`. + docsByRange <&> \docs -> + let closedFISs :: [(Clustering.ItemSupport, Cluster)] = + Clustering.closedFrequentItemSets + minSupp + minSize + roots + ( RootsOfAxiom ... + [ documentRoots doc & Map.keys & Set.fromList + | doc <- docs & toList + ] + ) + & unName + in Map.fromListWith + (<>) + [ ( c + , Seq.singleton + Clustering.Transaction + { transactionData = doc + , transactionItems = documentRoots doc & Map.keys & Set.fromList + } + ) + | doc <- docs & toList + , (_supp, c :: Cluster) <- closedFISs + , Set.isSubsetOf c (documentRoots doc & Map.keys & Set.fromList) + ] + +type MapList k a = [(k, a)] +type (:->) = Map.Map +type Cluster = Clustering.ItemSet Root +infixr 0 :-> + +data Range pos = Range + { rangeMin :: pos + , rangeMax :: pos + -- , periodScales :: [Scale] + } + deriving (Eq, Ord, Generic, Show) +instance Validity pos => Validity (Range pos) + +type Vocabulary = Map.Map Root () diff --git a/src/Phylomemy/References.hs b/src/Phylomemy/References.hs new file mode 100644 index 0000000..4ebc12e --- /dev/null +++ b/src/Phylomemy/References.hs @@ -0,0 +1,34 @@ +module Phylomemy.References where + +data RefDrawMeScience +-- ^ David Chavalarias, Quentin Lobbe, Alexandre Delanoë. +-- Draw me Science - multi-level and multi-scale reconstruction of knowledge dynamics with phylomemies. 2021. ⟨[hal-03180347v2](https://hal.science/hal-03180347v2)⟩ +-- > { +-- > "URL": "https://hal.science/hal-03180347", +-- > "author": [ +-- > { +-- > "family": "Chavalarias", +-- > "given": "David" +-- > }, +-- > { +-- > "family": "Lobbe", +-- > "given": "Quentin" +-- > }, +-- > { +-- > "family": "Delanoë", +-- > "given": "Alexandre" +-- > } +-- > ], +-- > "id": "chavalarias:hal-03180347", +-- > "issued": { +-- > "date-parts": [ +-- > [ +-- > 2021, +-- > 3 +-- > ] +-- > ] +-- > }, +-- > "note": "working paper or preprint", +-- > "title": "Draw me Science - multi-level and multi-scale reconstruction of knowledge dynamics with phylomemies", +-- > "type": "manuscript" +-- > } diff --git a/src/Phylomemy/Similarity.hs b/src/Phylomemy/Similarity.hs new file mode 100644 index 0000000..5cd17fe --- /dev/null +++ b/src/Phylomemy/Similarity.hs @@ -0,0 +1,129 @@ +module Phylomemy.Similarity ( + type RootTuple (..), + rootTuple, + type RootMatrix, + type DocumentCoOccurences (), + type CoOccurences (..), + documentCoOccurences, + type Confidences (), + type Similarities, + confidences, +) where + +import Control.Applicative (Applicative (..)) +import Data.Bool +import Data.Eq (Eq) +import Data.Function (($)) +import Data.Functor ((<$>)) +import Data.Map.Strict qualified as Map +import Data.Maybe (Maybe (..)) +import Data.Monoid (Monoid (..)) +import Data.Ord (Ord (..)) +import Data.Ratio ((%)) +import Data.Semigroup (Semigroup (..), (<>)) +import Data.Validity (Validity (..), declare) +import GHC.Generics (Generic) +import Logic +import Numeric.Decimal (MonadThrow) +import Numeric.Natural (Natural) +import Text.Show (Show) +import Prelude (fromIntegral, (+)) + +import Numeric.Probability +import Phylomemy.Indexation + +-- | root-to-root co-occurrence matrix +type Count = Natural + +-- | Orderered Tuple +newtype RootTuple = RootTuple (Root, Root) + +rootTuple :: Root -> Root -> RootTuple +rootTuple i j = RootTuple (if i <= j then (i, j) else (j, i)) + +type RootMatrix a = Map.Map Root (Map.Map Root a) + +-- Encode a symmetric matrix of co-occurences of roots inside 'Document's. +-- by mapping each root 'i', to a map of each root 'j' greater or equal to 'i', +-- to the number of documents containing both 'i' and 'j'. +-- +-- The special case @(i == j)@ is thus the number of document containing 'i'. +-- +-- When 'j' does not appear in any document containing 'i', +-- there is no entry for 'j' in the map under 'i'. +-- +-- When 'i' > 'j', +-- there is no entry for 'j' in the map under 'i', +-- because it belongs to the map under 'j'. +newtype CoOccurences = CoOccurences (RootMatrix Count) + deriving (Eq, Show, Generic) + +instance Semigroup CoOccurences where + (<>) (CoOccurences x) (CoOccurences y) = + CoOccurences (Map.unionWith (Map.unionWith (+)) x y) +instance Monoid CoOccurences where + mempty = CoOccurences Map.empty +instance Validity CoOccurences where + validate (CoOccurences is) = + mconcat + [ declare "The CoOccurences is valid" $ + Map.foldrWithKey + ( \i js iAcc -> + iAcc + && Map.foldrWithKey + ( \j c jAcc -> + jAcc && i <= j && 1 <= c + ) + True + js + ) + True + is + ] + +data DocumentCoOccurences document = DocumentCoOccurences + +-- | @(documentCoOccurences document)@ returns the trivial case of `documentCoOccurences` +-- for a single @(document)@ where all its `documentRoots` +-- co-occurs with each others, in one @(document)@ (this one). +documentCoOccurences :: document ::: Document pos -> DocumentCoOccurences document ::: CoOccurences +documentCoOccurences (Named doc) = (DocumentCoOccurences ...) $ + CoOccurences $ + (`Map.mapWithKey` documentRoots doc) $ \i () -> + (`Map.mapMaybeWithKey` documentRoots doc) $ \j () -> + if i <= j then Just 1 else Nothing + +-- | A similarity defines what is considered to be meaningful relation between root terms. +type Similarities similarity = RootMatrix similarity + +data Confidences document = ConfidencesAxiom + +-- | First order / syntagmatic axis. +-- A confidence is a weak logic implication. +-- +-- Definition: in `Phylomemy.References.RefDrawMeScience`, « C.2 Similarity measures (²Φ) » +-- +-- > The similarity measure P(x, y) between n-grams x and y +-- > is a function of the number of documents that mention both of them. +-- > [Here] the similarity measure i[s] the confidence +-- > defined as the maximum of the two probabilities +-- > of having a term knowing the presence of the other in the same contextual unit +-- > @P(x, y) = max(P(x|y), P(y|x))@. +confidences :: + MonadThrow m => + DocumentCoOccurences document ::: CoOccurences -> + m (Confidences document ::: Similarities Probability) +confidences (Named (CoOccurences i2j2c)) = + (ConfidencesAxiom ...) + <$> Map.traverseWithKey + ( \i j2c -> + let ii = fromIntegral (j2c Map.! i) + in Map.traverseWithKey + ( \j c -> do + let ij = fromIntegral c + let jj = fromIntegral $ i2j2c Map.! j Map.! j + max <$> probability (ij % ii) <*> probability (ij % jj) + ) + j2c + ) + i2j2c diff --git a/src/Phylomemy/TemporalMatching.hs b/src/Phylomemy/TemporalMatching.hs new file mode 100644 index 0000000..f26ebe8 --- /dev/null +++ b/src/Phylomemy/TemporalMatching.hs @@ -0,0 +1,343 @@ +module Phylomemy.TemporalMatching where + +-- import Data.Traversable (traverse) +-- import Debug.Pretty.Simple (pTraceShow, pTraceShowId) +import Control.Monad (Monad (..), foldM, forM_, unless) +import Control.Monad.ST qualified as ST +import Data.Bool (otherwise) +import Data.Either (fromLeft) +import Data.Eq (Eq (..)) +import Data.Foldable (any, foldMap', toList) +import Data.Function (($), (&), (.)) +import Data.Functor (Functor (..), (<$>), (<&>)) +import Data.Int (Int) +import Data.List qualified as List +import Data.Map.Strict qualified as Map +import Data.Maybe (Maybe (..)) +import Data.Ord (Ord (..)) +import Data.Ord qualified as Ord +import Data.Ratio (Rational, (%)) +import Data.Scientific (toBoundedRealFloat) +import Data.Semigroup (Semigroup (..)) +import Data.Sequence (Seq) +import Data.Sequence qualified as Seq +import Data.Set (Set) +import Data.Set qualified as Set +import Data.Tree qualified as Tree +import Data.Tuple (uncurry) +import GHC.Stack (HasCallStack) +import Logic hiding ((/)) +import Numeric.Decimal qualified as Decimal +import Numeric.Probability +import Text.Show (Show) +import Prelude (Double, Num (..), fromIntegral, pi, tan, toRational, (/), (^)) + +import Clustering.FrequentItemSet.BruteForce qualified as Clustering +import Clustering.UnionFind.ST qualified as UnionFind +import Phylomemy.Indexation + +type Similarity = Probability + +cardinal :: Num i => Set a -> i +cardinal = fromIntegral . Set.size + +-- TODO: implement biased similarity from the paper +similarityJaccard :: Ord.Ord a => Set a -> Set a -> Similarity +similarityJaccard x y = + cardinal (Set.intersection x y) % cardinal (Set.union x y) + & probability + & Decimal.arithError + +-- | A `MaximalSpanningTree` (MST) is one (possibly out of many) +-- tree spanning accross all the given `(range, cluster)` nodes, +-- with the maximal sum of edges' `Similarity` between them. +-- +-- ExplanationNote: https://en.wikipedia.org/wiki/Minimum_spanning_tree +-- +-- Viewing a phylomemy as a `MaximalSpanningTree` +-- is the crux of understanding how it is computed: +-- +-- - the `mstMinimalSimilarity` is the next `Similarity` +-- that will split the `MaximalSpanningTree` into two or more `MaximalSpanningForest`. +-- +-- - it explains what the "scale of a phylomemy" is: +-- merging clusters of the same range and same `MaximalSpanningTree` +-- when they still belong to the same `MaximalSpanningTree`. +-- +-- ImplementationNote: using a `Tree.Tree` to represent a `MaximalSpanningTree` +-- (instead of an adjacency edge map for instance) +-- is motivated by the need to implement `mstSplit`, +-- which needs to gather the `(range, cluster)` nodes +-- of the `MaximalSpanningForest` resulting from the cut, +-- which will then be filtered by `msfGlobalQuality`. +-- +-- TODO: "Inadequacies of Minimum Spanning Trees in Molecular Epidemiology" +-- https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3187300/ +-- +-- TODO: "Divide-and-conquer based all spanning tree generation algorithm of a simple connected graph" +-- https://www.sciencedirect.com/science/article/abs/pii/S0304397521006952 +-- +-- WarningNote: "The tree of one percent" +-- https://link.springer.com/content/pdf/10.1186/gb-2006-7-10-118.pdf +type MaximalSpanningTree range cluster = Tree.Tree (MSTNode range cluster) + +data MSTNode range cluster = MSTNode + { mstNodeSimilarity :: Similarity + -- ^ The `similarity` of the parent edge of this node + -- ie. of the only edge getting closer to the root node of the `MaximalSpanningTree`. + -- + -- ImplementationNote: `maximalSpanningForest` puts a `Similarity` of `1` for the root node. + -- and `mstSplit` leaves the `mstMinimalSimilarity` of the edge before splitting out the `MaximalSpanningTree`. + , mstNodeRangeCluster :: (range, cluster) + -- , nodeMSTMinSimilarity :: Min Similarity + } + deriving (Show) + +-- | A (disjoint) forest of `MaximalSpanningTree`s, +-- indexed by the minimal @(range, cluster)@ node +-- of each `MaximalSpanningTree`. +type MaximalSpanningForest range cluster = + [MaximalSpanningTree range cluster] + +-- | @(`maximalSpanningForest` allSimils)@ +-- uses the Kruskal algorithm to find the maximal spanning trees +-- of the given `AllSimilarities`. +-- +-- ExplanationNote: https://en.wikipedia.org/wiki/Kruskal's_algorithm +maximalSpanningForest :: + forall range cluster doc. + Ord range => + Ord cluster => + {-similarityMeasure :::-} (cluster -> cluster -> Similarity) -> + {-clusters :::-} range :-> cluster :-> Seq (Clustering.Transaction Root doc) -> + MaximalSpanningForest range cluster +maximalSpanningForest similarity rangeToClusterToDocs = ST.runST do + -- DescriptionNote: create a `Point` for each given node + -- each Point containing a root node and a maximal spanning tree. + rangeToClusterToPoint :: range :-> cluster :-> UnionFind.Point s (MaximalSpanningTree range cluster) <- + rangeToClusterToDocs + & Map.traverseWithKey \srcR -> + Map.traverseWithKey \srcC _docs -> + UnionFind.fresh $ + Tree.Node + MSTNode + { mstNodeSimilarity = proba1 + , mstNodeRangeCluster = (srcR, srcC) + } + [] + -- DescriptionNote: iterate from the greatest to the lowest similarity edge. + forM_ (allEdges & Map.toDescList) \(simil, edges) -> do + -- Iterate through all the edges of that `Similarity`. + forM_ (edges & toList) \((srcR, srcC), (dstR, dstC)) -> do + let srcPoint = rangeToClusterToPoint Map.! srcR Map.! srcC + let dstPoint = rangeToClusterToPoint Map.! dstR Map.! dstC + alreadyInSameMST <- UnionFind.equivalent srcPoint dstPoint + unless alreadyInSameMST do + -- DescriptionNote: the nodes of this edge (src -> dst) belong to the same `MaximalSpanningTree`. + UnionFind.unionWith srcPoint dstPoint \(Tree.Node srcRoot srcForest) (Tree.Node dstRoot dstForest) -> + return $ + Tree.Node srcRoot $ + Tree.Node + MSTNode + { mstNodeSimilarity = simil + , mstNodeRangeCluster = mstNodeRangeCluster dstRoot + } + dstForest + : srcForest + rootForest :: [MaximalSpanningTree range cluster] <- + rangeToClusterToPoint + -- DescriptionNote: collect all the Points. + & Map.elems + & List.concatMap Map.elems + -- DescriptionNote: keep only the `MaximalSpanningTree`s + -- contained in non-redundant `Point`s. + & (`foldM` []) \acc p -> do + isRedundant <- UnionFind.redundant p + if isRedundant + then return acc + else UnionFind.descriptor p <&> (: acc) + return rootForest + where + -- Order `rangeToClusterToDocs` by `Similarity`, + -- which will enable to add edges to the spanning tree in decreasing `Similarity` + -- and hence build *a* maximal spanning tree. + allEdges :: Similarity :-> Seq ((range, cluster), (range, cluster)) + allEdges = + Map.unionsWith + (Seq.><) + [ Map.singleton (similarity srcC dstC) (Seq.singleton (src, dst)) + | (srcR, srcClusterToDocuments) <- rangeToClusterToDocs & Map.toList + , (srcC, _docs) <- srcClusterToDocuments & Map.toList + , let src = (srcR, srcC) + , -- ExplanationNote: it does not matter whether lower or greater ranges are used for the destination nodes, + -- as they contain the very same undirected edges and thus `Similarity`, + -- hence would produce the same splitting `Similarity`s, + -- though not necessarily the same `MaximalSpanningTree`. + let (_, dstRangeToClusterToDocs) = Map.split srcR rangeToClusterToDocs + , (dstR, dstClusterToDocs) <- dstRangeToClusterToDocs & Map.toList + , (dstC, _docs) <- dstClusterToDocs & Map.toList + , let dst = (dstR, dstC) + ] + +-- | "sea-level rise algorithm" +-- +-- See: in `Phylomemy.References.RefDrawMeScience`, +-- « C.5 The sea-level rise algorithm and its implementation in Gargantext » +msfSplit :: + HasCallStack => + forall range roots predictionMeasure. + Show range => + Ord range => + predictionMeasure ::: (Set (range, Cluster) -> Set (range, Cluster) -> Decimal.Arith Similarity) -> + roots ::: Set Root -> + MaximalSpanningForest range Cluster -> + MaximalSpanningForest range Cluster +msfSplit predictionMeasure roots = + loop (return proba0) [] + where + loop previousQuality doneBranches currentBranches = + -- pTraceShow (["msfSplit", "loop"], ("previousQuality", previousQuality), ("doneBranches", List.length doneBranches), ("todoBranches", List.length currentBranches)) $ + case currentBranches of + [] -> doneBranches + currentBranch : todoBranches -> + let splitBranches = mstSplit currentBranch + in -- pTraceShow (["msfSplit", "loop", "splitBranches"], ("size", Map.size splitBranches)) $ + if List.length splitBranches <= 1 + then loop previousQuality (doneBranches <> splitBranches) todoBranches + else letName (fmap mstNodes $ (doneBranches <> splitBranches) <> todoBranches) \mstToNodes -> + let splitQuality = msfGlobalQuality predictionMeasure roots mstToNodes + in if previousQuality < splitQuality + then loop splitQuality doneBranches (splitBranches <> todoBranches) + else loop previousQuality (doneBranches <> splitBranches) todoBranches + +mstSplit :: + forall range cluster. + HasCallStack => + Show range => + Show cluster => + Ord range => + Ord cluster => + MaximalSpanningTree range cluster -> + MaximalSpanningForest range cluster +mstSplit mst = + case mstMinimalSimilarity mst of + Nothing -> [mst] + Just minSimil -> cutMerge mst + where + cutMerge = uncurry (:) . cut + cut :: MaximalSpanningTree range cluster -> (MaximalSpanningTree range cluster, [MaximalSpanningTree range cluster]) + cut (Tree.Node node children) = + let (keptChildren, cutChildren) = + children & List.partition \tree -> + minSimil < mstNodeSimilarity (Tree.rootLabel tree) + in let cutChildrenRoots = cutChildren & List.concatMap cutMerge + in let (keptChildrenForest, cutChildrenTree) = List.unzip (cut <$> keptChildren) + in let cutChildrenTreeRoots = cutChildrenTree & List.concat & List.concatMap cutMerge + in ( Tree.Node node keptChildrenForest + , (cutChildrenRoots List.++ cutChildrenTreeRoots) + -- WarningNote: the root nodes of those new `MaximalSpanningTree`s + -- keep their `mstNodeSimilarity` to their cutting value, + -- which is lower than their `mstMinimalSimilarity`. + ) + +mstMinimalSimilarity :: MaximalSpanningTree range cluster -> Maybe Similarity +mstMinimalSimilarity (Tree.Node _rootNode rootBranches) + | List.null rootBranches = Nothing + | otherwise = + -- ExplanationNote: the root node of a `MaximalSpanningTree`, + -- being a root node, does not have parent, + -- hence its `mstNodeSimilarity` must be ignored. + Just $ + List.minimum $ + rootBranches + <&> Tree.foldTree \node accs -> + List.minimum $ (mstNodeSimilarity node) : accs + +-- | @(`mstSplittingSimilarities` mst)@ +-- returns the `Similarity`s causing the given `MaximalSpanningForest` +-- to split into further more `MaximalSpanningForest`. +mstSplittingSimilarities :: MaximalSpanningTree range cluster -> Set Similarity +mstSplittingSimilarities (Tree.Node _rootNode rootBranches) + | List.null rootBranches = Set.empty + | otherwise = rootBranches & foldMap' (Tree.foldTree \node accs -> Set.unions (Set.singleton (mstNodeSimilarity node) : accs)) + +-- | @(`mstNodes` branch)@ returns the nodes of the given @(branch)@. +mstNodes :: + HasCallStack => + Ord range => + Ord cluster => + MaximalSpanningTree range cluster -> + Set (range, cluster) +mstNodes = foldMap' (Set.singleton . mstNodeRangeCluster) + +-- | The global quality of a `Phylomemy`. +-- +-- DescriptionNote: `msfGlobalQuality` counts the `(range, Cluster)` nodes of a branch +-- but that a `(range, Cluster)` in itself can gather several documents. +msfGlobalQuality :: + HasCallStack => + Ord range => + predictionMeasure ::: (Set (range, Cluster) -> Set (range, Cluster) -> Decimal.Arith Similarity) -> + roots ::: Set Root -> + mstToNodes ::: [Set (range, Cluster)] -> + Decimal.Arith Similarity +msfGlobalQuality (Named predictionMeasure) (Named roots) (Named mstToNodes) = + List.sum + [ probability (1 % cardinal roots) + * List.sum + [ probability (cardinal retrievedNodes % nodesTotal) + * predictionMeasure relevantNodes retrievedNodes + | retrievedNodes <- branches + ] + | root <- roots & toList + , -- CorrectnessNote: ignore branches not containing any requested `root` + let branches = mstToNodes & List.filter (any (\(_r, c) -> Set.member root c)) + , let nodesTotal = branches & List.map (cardinal @Int) & List.sum & fromIntegral + , let relevantNodes = branches & foldMap' (Set.filter (\(_r, c) -> Set.member root c)) + ] + +-- | L'idée de la F-mesure est de s'assurer qu'un classificateur fait de bonnes +-- prédictions de la classe pertinente (bonne précision) +-- en suffisamment grand nombre (bon rappel) +-- sur un jeu de données cible. +-- +-- Tout comme la précision et le rappel, +-- la F-mesure varie de 0 (plus mauvaise valeur) +-- à 1 (meilleure valeur possible). +-- +-- ExplanationNote: https://en.wikipedia.org/wiki/F-score +predictionMeasureF :: + HasCallStack => + Ord range => + {-lambda :::-} + + -- | Trade-off between `precision` and `recall`. + -- See https://en.wikipedia.org/wiki/Precision_and_recall + -- + -- > [It] predetermine[s] the desired shape of the phylomemy: a continent + -- > (i.e., one large branch) or an archipelago of elements of knowledge + -- > (i.e., many small branches). The estimation of `lambda` is left to the researcher’s + -- > discretion in light of her own expertise and research questions, which makes + -- > any phylomemy an artifact of the researcher’s perception + -- + -- For @(lambda == 0)@, only the `precision` counts, whereas for @(lambda == 1)@, only the `recall` counts. + Similarity -> + Set (range, Cluster) -> + Set (range, Cluster) -> + Decimal.Arith Similarity +predictionMeasureF lambda relevantNodes retrievedNodes + | lambda == proba0 = probability precision + | lambda == proba1 = probability recall + | otherwise = probability $ precision * recall * (1 + betaSquare) / (recall + betaSquare * precision) + where + precision = cardinal relevantRetrievedNodes % cardinal retrievedNodes + recall = cardinal relevantRetrievedNodes % cardinal relevantNodes + relevantRetrievedNodes = Set.intersection relevantNodes retrievedNodes + lambdaDouble = lambda & Decimal.toScientificDecimal & toBoundedRealFloat @Double & fromLeft 1 + -- ExplanationNote: the `tan` is just to spread `lambda` + -- Two commonly used values for β are: + -- - 2, which weighs recall higher than precision, + -- - and 0.5, which weighs recall lower than precision. + beta = tan (lambdaDouble * pi / 2) + betaSquare :: Rational + betaSquare = beta ^ (2 :: Int) & toRational diff --git a/src/Prelude.hs b/src/Prelude.hs new file mode 100644 index 0000000..02bfe61 --- /dev/null +++ b/src/Prelude.hs @@ -0,0 +1,32 @@ +{-# LANGUAGE PackageImports #-} +module Prelude ( + -- module BasePrelude + module Prelude, + (^), + Bounded (..), + Double, + Enum, + Fractional (..), + Integral(..), + Num (..), + Rational, + Real, + error, + fromIntegral, + pi, + tan, + toRational, + undefined, + IO, + max, + min, +) +where + +import "base" Prelude as BasePrelude + +-- | Reverse function composition (do f first, then g) +(>.>) :: (a -> b) -> (b -> c) -> a -> c +infixr 9 >.> +(f >.> g) x = g (f x) +{-# INLINE (>.>) #-} diff --git a/tests/Clustering/FrequentItemSet/AprioriSpec.hs b/tests/Clustering/FrequentItemSet/AprioriSpec.hs new file mode 100644 index 0000000..5b214dd --- /dev/null +++ b/tests/Clustering/FrequentItemSet/AprioriSpec.hs @@ -0,0 +1,83 @@ +{-# LANGUAGE OverloadedLists #-} + +module Clustering.FrequentItemSet.AprioriSpec where + +import Control.Monad (Monad (..)) +import Data.List qualified as List +import Test.Syd +import Test.Syd.Validity + +import Clustering.FrequentItemSet.Apriori + +spec = do + describe "frequentItemSets" do + it "solves Takeaki Uno example" do + -- From https://research.nii.ac.jp/~uno/code/lcm.html#IntroductionstoFrequentItemsetMining + ( frequentItemSets + [ [1, 2, 5, 6, 7] + , [2, 3, 4, 5] + , [1, 2, 7, 8, 9] + , [1, 7, 9] + , [2, 7, 9] + , [2, 7, 9] -- Copy-paste typo on the original example + , [1, 9] -- Add this to increase the support of [1,9] because the original example is wrong… + , [2] + ] + 3 + ) + `shouldBe` [ [] + , [1] + , [1, 7] + , [1, 9] + , [2] + , [2, 7] + , [2, 7, 9] + , [2, 9] + , [7] + , [7, 9] + , [9] + ] + +{- +it "solves a singleton transaction" do + naiveClosedFrequentItemSets + [ [1] + ] 1 `shouldBe` + [ [1] + ] +it "solves a basic example" do + naiveClosedFrequentItemSets + [ [1] + , [1,2] + ] 1 `shouldBe` + [ [1] + , [1, 2] + ] +it "solves a another basic example" do + naiveClosedFrequentItemSets + [ [1] + , [1,2] + , [1,2] + ] 1 `shouldBe` + [ [1] + , [1, 2] + ] +it "" do + naiveClosedFrequentItemSets + [ [1,2,3,4,5,6] + , [2,3,5] + , [2,5] + , [1,2,4,5,6] + , [2,4] + , [1,4,6] + , [3,4,6] + ] 3 `shouldBe` + [ [4] + , [2] + , [2,4] + , [4,6] + , [2,5] + , [3] + , [1,4,6] + ] +-} diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=1.minSupp=1.minSize=1.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=1.minSupp=1.minSize=1.golden new file mode 100644 index 0000000..b43a91c --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=1.minSupp=1.minSize=1.golden @@ -0,0 +1,26 @@ +fromList + [ ( fromList [ "a" ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ "a" , "b" ] + } + ] + ) + , ( fromList [ "a" , "b" ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ "a" , "b" ] + } + ] + ) + , ( fromList [ "b" ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ "a" , "b" ] + } + ] + ) + ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=1.minSupp=1.minSize=2.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=1.minSupp=1.minSize=2.golden new file mode 100644 index 0000000..83584c6 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=1.minSupp=1.minSize=2.golden @@ -0,0 +1,34 @@ +fromList + [ ( fromList [ "a" , "b" ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ "a" , "b" , "c" ] + } + ] + ) + , ( fromList [ "a" , "b" , "c" ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ "a" , "b" , "c" ] + } + ] + ) + , ( fromList [ "a" , "c" ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ "a" , "b" , "c" ] + } + ] + ) + , ( fromList [ "b" , "c" ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ "a" , "b" , "c" ] + } + ] + ) + ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=2.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=2.golden new file mode 100644 index 0000000..d3dc34e --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=2.golden @@ -0,0 +1,212 @@ +fromList + [ ( fromList [ 1 , 2 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 ] + } + ] + ) + , ( fromList [ 1 , 2 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 ] + } + ] + ) + , ( fromList [ 1 , 3 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 1 , 3 , 6 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 1 , 3 , 6 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 1 , 3 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 1 , 6 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 1 , 6 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 1 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 ] + } + ] + ) + , ( fromList [ 2 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 ] + } + ] + ) + , ( fromList [ 2 , 8 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 2 , 8 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 2 , 8 , 9 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 2 , 8 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 2 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 2 , 9 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 2 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 3 , 6 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 3 , 6 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 3 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 5 , 11 ] + , fromList + [ Transaction + { transactionData = () , transactionItems = fromList [ 5 , 11 ] } + ] + ) + , ( fromList [ 6 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 8 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 8 , 9 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 8 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 9 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=3.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=3.golden new file mode 100644 index 0000000..d5311b3 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=3.golden @@ -0,0 +1,90 @@ +fromList + [ ( fromList [ 1 , 2 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 ] + } + ] + ) + , ( fromList [ 1 , 3 , 6 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 1 , 3 , 6 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 1 , 3 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 1 , 6 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 2 , 8 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 2 , 8 , 9 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 2 , 8 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 2 , 9 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + , ( fromList [ 3 , 6 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 8 , 9 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=4.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=4.golden new file mode 100644 index 0000000..a142f60 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=1.minSize=4.golden @@ -0,0 +1,18 @@ +fromList + [ ( fromList [ 1 , 3 , 6 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + ] + ) + , ( fromList [ 2 , 8 , 9 , 10 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 8 , 9 , 10 ] + } + ] + ) + ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=2.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=2.golden new file mode 100644 index 0000000..77f8bfb --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=2.golden @@ -0,0 +1,14 @@ +fromList + [ ( fromList [ 1 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 3 , 6 , 7 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 ] + } + ] + ) + ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=3.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=3.golden new file mode 100644 index 0000000..825e091 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=3.golden @@ -0,0 +1 @@ +fromList [] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=4.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=4.golden new file mode 100644 index 0000000..825e091 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=2.minSize=4.golden @@ -0,0 +1 @@ +fromList [] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=2.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=2.golden new file mode 100644 index 0000000..825e091 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=2.golden @@ -0,0 +1 @@ +fromList [] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=3.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=3.golden new file mode 100644 index 0000000..825e091 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=3.golden @@ -0,0 +1 @@ +fromList [] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=4.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=4.golden new file mode 100644 index 0000000..825e091 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=HAL03500847T2.minSupp=3.minSize=4.golden @@ -0,0 +1 @@ +fromList [] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=2.minSize=2.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=2.minSize=2.golden new file mode 100644 index 0000000..a83c3f2 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=2.minSize=2.golden @@ -0,0 +1,152 @@ +fromList + [ ( fromList [ 1 , 2 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 5 , 6 , 7 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + ] + ) + , ( fromList [ 1 , 2 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 5 , 6 , 7 ] + } + ] + ) + , ( fromList [ 1 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 5 , 6 , 7 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 7 , 9 ] + } + ] + ) + , ( fromList [ 1 , 7 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + ] + ) + , ( fromList [ 1 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 7 , 9 ] + } + , Transaction + { transactionData = () , transactionItems = fromList [ 1 , 9 ] } + ] + ) + , ( fromList [ 2 , 5 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 5 , 6 , 7 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 3 , 4 , 5 ] + } + ] + ) + , ( fromList [ 2 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 5 , 6 , 7 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + ] + ) + , ( fromList [ 2 , 7 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + ] + ) + , ( fromList [ 2 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + ] + ) + , ( fromList [ 7 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + ] + ) + ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=2.minSize=3.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=2.minSize=3.golden new file mode 100644 index 0000000..16aee8e --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=2.minSize=3.golden @@ -0,0 +1,42 @@ +fromList + [ ( fromList [ 1 , 2 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 5 , 6 , 7 ] + } + ] + ) + , ( fromList [ 1 , 7 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + ] + ) + , ( fromList [ 2 , 7 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + ] + ) + ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=3.minSize=2.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=3.minSize=2.golden new file mode 100644 index 0000000..89f25e1 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=3.minSize=2.golden @@ -0,0 +1,104 @@ +fromList + [ ( fromList [ 1 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 5 , 6 , 7 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 7 , 9 ] + } + ] + ) + , ( fromList [ 1 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 7 , 9 ] + } + , Transaction + { transactionData = () , transactionItems = fromList [ 1 , 9 ] } + ] + ) + , ( fromList [ 2 , 7 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 5 , 6 , 7 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + ] + ) + , ( fromList [ 2 , 7 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + ] + ) + , ( fromList [ 2 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + ] + ) + , ( fromList [ 7 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + ] + ) + ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=3.minSize=3.golden b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=3.minSize=3.golden new file mode 100644 index 0000000..3f787ea --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allClosedFrequentItemSets/db=TakeakiUno.minSupp=3.minSize=3.golden @@ -0,0 +1,18 @@ +fromList + [ ( fromList [ 2 , 7 , 9 ] + , fromList + [ Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 2 , 7 , 9 ] + } + , Transaction + { transactionData = () + , transactionItems = fromList [ 1 , 2 , 7 , 8 , 9 ] + } + ] + ) + ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=HAL03500847T2.minSupp=2.golden b/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=HAL03500847T2.minSupp=2.golden new file mode 100644 index 0000000..eb6a71d --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=HAL03500847T2.minSupp=2.golden @@ -0,0 +1,6 @@ +[ fromList [] +, fromList [ 1 ] +, fromList [ 1 , 7 ] +, fromList [ 2 ] +, fromList [ 7 ] +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=HAL03500847T2.minSupp=3.golden b/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=HAL03500847T2.minSupp=3.golden new file mode 100644 index 0000000..d5b0181 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=HAL03500847T2.minSupp=3.golden @@ -0,0 +1 @@ +[ fromList [] ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=TakeakiUno.minSupp=2.golden b/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=TakeakiUno.minSupp=2.golden new file mode 100644 index 0000000..1879d13 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=TakeakiUno.minSupp=2.golden @@ -0,0 +1,17 @@ +[ fromList [] +, fromList [ 1 ] +, fromList [ 1 , 2 ] +, fromList [ 1 , 2 , 7 ] +, fromList [ 1 , 7 ] +, fromList [ 1 , 7 , 9 ] +, fromList [ 1 , 9 ] +, fromList [ 2 ] +, fromList [ 2 , 5 ] +, fromList [ 2 , 7 ] +, fromList [ 2 , 7 , 9 ] +, fromList [ 2 , 9 ] +, fromList [ 5 ] +, fromList [ 7 ] +, fromList [ 7 , 9 ] +, fromList [ 9 ] +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=TakeakiUno.minSupp=3.golden b/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=TakeakiUno.minSupp=3.golden new file mode 100644 index 0000000..7bdd229 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/allFrequentItemSets/db=TakeakiUno.minSupp=3.golden @@ -0,0 +1,12 @@ +[ fromList [] +, fromList [ 1 ] +, fromList [ 1 , 7 ] +, fromList [ 1 , 9 ] +, fromList [ 2 ] +, fromList [ 2 , 7 ] +, fromList [ 2 , 7 , 9 ] +, fromList [ 2 , 9 ] +, fromList [ 7 ] +, fromList [ 7 , 9 ] +, fromList [ 9 ] +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/associationRules/TakeakiUno.golden.old b/tests/Clustering/FrequentItemSet/BruteForce/associationRules/TakeakiUno.golden.old new file mode 100644 index 0000000..7de99fe --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/associationRules/TakeakiUno.golden.old @@ -0,0 +1,1074 @@ +[ [] +, [] +, [] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 , 5 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 6 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 6 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 5 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 5 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 5 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 5 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 , 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 , 5 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 , 5 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 2 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 , 7 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 7 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 8 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 , 8 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 7 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 7 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 7 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 7 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 7 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 , 7 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 , 7 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 2 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 8 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 2 ] + } + ] +, [] +, [] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 5 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 6 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 5 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 5 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 ] + , associationRuleConfidence = 0.7500000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 7 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 1 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 7 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 7 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 7 ] + } + ] +, [] +, [ AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 1 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 1 ] + , associationRuleConfidence = 0.7500000000000000000 + , associationRuleConsequence = fromList [ 9 ] + } + ] +, [] +, [ AssociationRule + { associationRuleCause = fromList [ 3 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 2 , 3 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 4 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 3 ] + } + , AssociationRule + { associationRuleCause = fromList [ 3 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 4 ] + } + , AssociationRule + { associationRuleCause = fromList [ 3 , 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 3 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 2 , 3 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 4 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 3 , 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 3 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 4 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 3 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 4 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 3 ] + } + , AssociationRule + { associationRuleCause = fromList [ 3 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 4 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 3 , 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 3 , 4 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 3 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 4 ] + } + , AssociationRule + { associationRuleCause = fromList [ 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 3 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 4 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 3 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 2 , 3 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 3 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 3 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 2 , 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 4 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 2 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 5 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 2 , 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 5 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 5 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 2 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 7 ] + , associationRuleConfidence = 0.8000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 2 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 7 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 2 , 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 7 , 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 7 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 7 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 2 , 7 ] + , associationRuleConfidence = 0.7500000000000000000 + , associationRuleConsequence = fromList [ 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 2 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 7 , 9 ] + , associationRuleConfidence = 0.7500000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 2 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 2 ] + } + ] +, [] +, [] +, [ AssociationRule + { associationRuleCause = fromList [ 3 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 4 ] + } + , AssociationRule + { associationRuleCause = fromList [ 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 3 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 3 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 4 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 3 , 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 3 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 4 ] + } + , AssociationRule + { associationRuleCause = fromList [ 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 3 , 5 ] + } + , AssociationRule + { associationRuleCause = fromList [ 4 , 5 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 3 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 3 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + ] +, [] +, [ AssociationRule + { associationRuleCause = fromList [ 4 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + ] +, [] +, [ AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 5 , 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 5 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 6 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 , 7 ] + } + , AssociationRule + { associationRuleCause = fromList [ 6 , 7 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 5 ] + } + ] +, [] +, [] +, [ AssociationRule + { associationRuleCause = fromList [ 6 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + ] +, [] +, [ AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 7 , 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 , 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 8 , 9 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + ] +, [ AssociationRule + { associationRuleCause = fromList [ 7 ] + , associationRuleConfidence = 0.8000000000000000000 + , associationRuleConsequence = fromList [ 9 ] + } + , AssociationRule + { associationRuleCause = fromList [ 9 ] + , associationRuleConfidence = 0.8000000000000000000 + , associationRuleConsequence = fromList [ 7 ] + } + ] +, [] +, [ AssociationRule + { associationRuleCause = fromList [ 8 ] + , associationRuleConfidence = 1.0000000000000000000 + , associationRuleConsequence = fromList [ 9 ] + } + ] +, [] +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/associationRules/db=TakeakiUno.minSupp=2.minConf=75%.golden b/tests/Clustering/FrequentItemSet/BruteForce/associationRules/db=TakeakiUno.minSupp=2.minConf=75%.golden new file mode 100644 index 0000000..3d25dba --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/associationRules/db=TakeakiUno.minSupp=2.minConf=75%.golden @@ -0,0 +1,67 @@ +[ [] +, [] +, [] +, [ Association + { associationCause = fromList [ 1 , 2 ] + , associationConfidence = 1.0000000000000000000 + , associationConsequence = fromList [ 7 ] + } + ] +, [ Association + { associationCause = fromList [ 1 ] + , associationConfidence = 0.7500000000000000000 + , associationConsequence = fromList [ 7 ] + } + ] +, [] +, [ Association + { associationCause = fromList [ 1 ] + , associationConfidence = 0.7500000000000000000 + , associationConsequence = fromList [ 9 ] + } + ] +, [] +, [ Association + { associationCause = fromList [ 5 ] + , associationConfidence = 1.0000000000000000000 + , associationConsequence = fromList [ 2 ] + } + ] +, [ Association + { associationCause = fromList [ 7 ] + , associationConfidence = 0.8000000000000000000 + , associationConsequence = fromList [ 2 ] + } + ] +, [ Association + { associationCause = fromList [ 2 , 7 ] + , associationConfidence = 0.7500000000000000000 + , associationConsequence = fromList [ 9 ] + } + , Association + { associationCause = fromList [ 2 , 9 ] + , associationConfidence = 1.0000000000000000000 + , associationConsequence = fromList [ 7 ] + } + , Association + { associationCause = fromList [ 7 , 9 ] + , associationConfidence = 0.7500000000000000000 + , associationConsequence = fromList [ 2 ] + } + ] +, [] +, [] +, [] +, [ Association + { associationCause = fromList [ 7 ] + , associationConfidence = 0.8000000000000000000 + , associationConsequence = fromList [ 9 ] + } + , Association + { associationCause = fromList [ 9 ] + , associationConfidence = 0.8000000000000000000 + , associationConsequence = fromList [ 7 ] + } + ] +, [] +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForce/associationRules/db=TakeakiUno.minSupp=3.minConf=75%.golden b/tests/Clustering/FrequentItemSet/BruteForce/associationRules/db=TakeakiUno.minSupp=3.minConf=75%.golden new file mode 100644 index 0000000..17fb536 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForce/associationRules/db=TakeakiUno.minSupp=3.minConf=75%.golden @@ -0,0 +1,52 @@ +[ [] +, [] +, [ Association + { associationCause = fromList [ 1 ] + , associationConfidence = 0.7500000000000000000 + , associationConsequence = fromList [ 7 ] + } + ] +, [ Association + { associationCause = fromList [ 1 ] + , associationConfidence = 0.7500000000000000000 + , associationConsequence = fromList [ 9 ] + } + ] +, [] +, [ Association + { associationCause = fromList [ 7 ] + , associationConfidence = 0.8000000000000000000 + , associationConsequence = fromList [ 2 ] + } + ] +, [ Association + { associationCause = fromList [ 2 , 7 ] + , associationConfidence = 0.7500000000000000000 + , associationConsequence = fromList [ 9 ] + } + , Association + { associationCause = fromList [ 2 , 9 ] + , associationConfidence = 1.0000000000000000000 + , associationConsequence = fromList [ 7 ] + } + , Association + { associationCause = fromList [ 7 , 9 ] + , associationConfidence = 0.7500000000000000000 + , associationConsequence = fromList [ 2 ] + } + ] +, [] +, [] +, [ Association + { associationCause = fromList [ 7 ] + , associationConfidence = 0.8000000000000000000 + , associationConsequence = fromList [ 9 ] + } + , Association + { associationCause = fromList [ 9 ] + , associationConfidence = 0.8000000000000000000 + , associationConsequence = fromList [ 7 ] + } + ] +, [] +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/BruteForceSpec.hs b/tests/Clustering/FrequentItemSet/BruteForceSpec.hs new file mode 100644 index 0000000..50fe308 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/BruteForceSpec.hs @@ -0,0 +1,113 @@ +{-# LANGUAGE OverloadedLists #-} +{-# LANGUAGE OverloadedStrings #-} + +module Clustering.FrequentItemSet.BruteForceSpec where + +import Control.Monad (forM_, return) +import Data.Function (on, ($), (&), (.)) +import Data.Functor ((<$>), (<&>)) +import Data.Int (Int) +import Data.List qualified as List +import Data.Ord (Ord (..)) +import Data.Ratio (Rational, (%)) +import Data.Semigroup (Semigroup (..)) +import Data.Sequence qualified as Seq +import Data.Set qualified as Set +import Data.Text.Short qualified as TextS +import GHC.IsList (toList) +import Logic +import Numeric.Probability (assertProbability) +import System.FilePath ((<.>)) +import Test.Syd +import Text.Show (Show (..)) +import Prelude (Num) + +import Clustering.FrequentItemSet.BruteForce +import Utils + +-- | From https://research.nii.ac.jp/~uno/code/lcm.html#IntroductionstoFrequentItemsetMining +databaseTakeakiUno :: Ord item => Num item => [Transaction item ()] +databaseTakeakiUno = + Transaction () + <$> [ [1, 2, 5, 6, 7] + , [2, 3, 4, 5] + , [1, 2, 7, 8, 9] + , [1, 7, 9] + , [2, 7, 9] + , [2, 7, 9] -- Copy-paste typo on the original example + , [1, 9] -- Add this to increase the support of [1,9] because the original example is wrong… + , [2] + ] + +-- | From https://hal.science/hal-03500847 +databaseHAL03500847T2 :: Ord item => Num item => [Transaction item ()] +databaseHAL03500847T2 = + Transaction () + <$> [ [1, 3, 7, 6] + , [1, 2, 7] + , [2, 8, 9, 10] + , [5, 11] + ] + +database1 :: [Transaction TextS.ShortText ()] +database1 = + Transaction () + <$> [["a", "b"]] + +spec :: Spec +spec = do + {- + describe "allFrequentItemSets" do + forM_ ([2 .. 3] :: [Int]) \minSupp -> + goldenShow ("db=TakeakiUno" <.> "minSupp=" <> show minSupp) $ + allFrequentItemSets @Int + (unitName databaseTakeakiUno) + (assertStrictlyPositive minSupp) + forM_ ([2 .. 3] :: [Int]) \minSupp -> + goldenShow ("db=HAL03500847T2" <.> "minSupp=" <> show minSupp) $ + allFrequentItemSets @Int + (unitName databaseHAL03500847T2) + (assertStrictlyPositive minSupp) + + describe "associationRules" do + forM_ ([2 .. 3] :: [Int]) \minSupp -> + forM_ ([(75 % 100)] :: [Rational]) \minConf -> + goldenShow + ("db=TakeakiUno" <.> "minSupp=" <> show minSupp <.> "minConf=75%") + [ associationRules + fis + (unitName databaseTakeakiUno) + (unitName (assertProbability minConf)) + | fis <- + allFrequentItemSets @Int + (unitName databaseTakeakiUno) + (assertStrictlyPositive minSupp) + ] + describe "allClosedFrequentItemSets" do + forM_ ([2 .. 3] :: [Int]) \minSupp -> + forM_ ([2 .. 3] :: [Int]) \minSize -> + goldenShow ("db=TakeakiUno" <.> "minSupp=" <> show minSupp <.> "minSize=" <> show minSize) $ + allClosedFrequentItemSets @Int + (assertStrictlyPositive minSupp) + (assertStrictlyPositive minSize) + (unitName (databaseTakeakiUno @Int)) + & unName + forM_ ([1 .. 3] :: [Int]) \minSupp -> + forM_ ([2 .. 4] :: [Int]) \minSize -> + goldenShow ("db=HAL03500847T2" <.> "minSupp=" <> show minSupp <.> "minSize=" <> show minSize) $ + allClosedFrequentItemSets @Int + (assertStrictlyPositive minSupp) + (assertStrictlyPositive minSize) + (unitName (databaseHAL03500847T2 @Int)) + & unName + describe "allClosedFrequentItemSets" do + forM_ ([1 .. 1] :: [Int]) \minSupp -> + forM_ ([1 .. 1] :: [Int]) \minSize -> + goldenShow ("db=1" <.> "minSupp=" <> show minSupp <.> "minSize=" <> show minSize) $ + allClosedFrequentItemSets @TextS.ShortText + (assertStrictlyPositive minSupp) + (assertStrictlyPositive minSize) + (unitName database1) + & unName + -} + return () diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=1.minSize=1.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=1.minSize=1.golden new file mode 100644 index 0000000..6b5db5c --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=1.minSize=1.golden @@ -0,0 +1 @@ +[ ( 1 , fromList [ 10 , 20 , 30 ] ) ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=1.minSize=2.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=1.minSize=2.golden new file mode 100644 index 0000000..6b5db5c --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=1.minSize=2.golden @@ -0,0 +1 @@ +[ ( 1 , fromList [ 10 , 20 , 30 ] ) ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=2.minSize=1.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=2.minSize=1.golden new file mode 100644 index 0000000..0637a08 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=2.minSize=1.golden @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=2.minSize=2.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=2.minSize=2.golden new file mode 100644 index 0000000..0637a08 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=1.minSupp=2.minSize=2.golden @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=1.minSize=1.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=1.minSize=1.golden new file mode 100644 index 0000000..75430b1 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=1.minSize=1.golden @@ -0,0 +1,3 @@ +[ ( 2 , fromList [ 10 , 20 ] ) +, ( 1 , fromList [ 10 , 20 , 30 ] ) +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=1.minSize=2.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=1.minSize=2.golden new file mode 100644 index 0000000..75430b1 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=1.minSize=2.golden @@ -0,0 +1,3 @@ +[ ( 2 , fromList [ 10 , 20 ] ) +, ( 1 , fromList [ 10 , 20 , 30 ] ) +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=2.minSize=1.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=2.minSize=1.golden new file mode 100644 index 0000000..6c548ef --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=2.minSize=1.golden @@ -0,0 +1 @@ +[ ( 2 , fromList [ 10 , 20 ] ) ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=2.minSize=2.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=2.minSize=2.golden new file mode 100644 index 0000000..6c548ef --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=2.minSupp=2.minSize=2.golden @@ -0,0 +1 @@ +[ ( 2 , fromList [ 10 , 20 ] ) ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=1.minSize=1.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=1.minSize=1.golden new file mode 100644 index 0000000..1d768b5 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=1.minSize=1.golden @@ -0,0 +1,4 @@ +[ ( 3 , fromList [ 10 ] ) +, ( 2 , fromList [ 10 , 20 ] ) +, ( 1 , fromList [ 10 , 20 , 30 ] ) +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=1.minSize=2.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=1.minSize=2.golden new file mode 100644 index 0000000..75430b1 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=1.minSize=2.golden @@ -0,0 +1,3 @@ +[ ( 2 , fromList [ 10 , 20 ] ) +, ( 1 , fromList [ 10 , 20 , 30 ] ) +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=2.minSize=1.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=2.minSize=1.golden new file mode 100644 index 0000000..83c0d27 --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=2.minSize=1.golden @@ -0,0 +1 @@ +[ ( 3 , fromList [ 10 ] ) , ( 2 , fromList [ 10 , 20 ] ) ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=2.minSize=2.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=2.minSize=2.golden new file mode 100644 index 0000000..6c548ef --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=3.minSupp=2.minSize=2.golden @@ -0,0 +1 @@ +[ ( 2 , fromList [ 10 , 20 ] ) ] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=1.minSize=1.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=1.minSize=1.golden new file mode 100644 index 0000000..4a7e45c --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=1.minSize=1.golden @@ -0,0 +1,12 @@ +[ ( 5 , fromList [ 2 ] ) +, ( 5 , fromList [ 4 ] ) +, ( 3 , fromList [ 2 , 4 ] ) +, ( 4 , fromList [ 2 , 5 ] ) +, ( 4 , fromList [ 4 , 6 ] ) +, ( 3 , fromList [ 1 , 4 , 6 ] ) +, ( 2 , fromList [ 1 , 2 , 4 , 5 , 6 ] ) +, ( 3 , fromList [ 3 ] ) +, ( 2 , fromList [ 2 , 3 , 5 ] ) +, ( 2 , fromList [ 3 , 4 , 6 ] ) +, ( 1 , fromList [ 1 , 2 , 3 , 4 , 5 , 6 ] ) +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=1.minSize=2.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=1.minSize=2.golden new file mode 100644 index 0000000..941913b --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=1.minSize=2.golden @@ -0,0 +1,9 @@ +[ ( 3 , fromList [ 2 , 4 ] ) +, ( 4 , fromList [ 2 , 5 ] ) +, ( 4 , fromList [ 4 , 6 ] ) +, ( 3 , fromList [ 1 , 4 , 6 ] ) +, ( 2 , fromList [ 1 , 2 , 4 , 5 , 6 ] ) +, ( 2 , fromList [ 2 , 3 , 5 ] ) +, ( 2 , fromList [ 3 , 4 , 6 ] ) +, ( 1 , fromList [ 1 , 2 , 3 , 4 , 5 , 6 ] ) +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=2.minSize=1.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=2.minSize=1.golden new file mode 100644 index 0000000..77e618f --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=2.minSize=1.golden @@ -0,0 +1,11 @@ +[ ( 5 , fromList [ 2 ] ) +, ( 5 , fromList [ 4 ] ) +, ( 3 , fromList [ 2 , 4 ] ) +, ( 4 , fromList [ 2 , 5 ] ) +, ( 4 , fromList [ 4 , 6 ] ) +, ( 3 , fromList [ 1 , 4 , 6 ] ) +, ( 2 , fromList [ 1 , 2 , 4 , 5 , 6 ] ) +, ( 3 , fromList [ 3 ] ) +, ( 2 , fromList [ 2 , 3 , 5 ] ) +, ( 2 , fromList [ 3 , 4 , 6 ] ) +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=2.minSize=2.golden b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=2.minSize=2.golden new file mode 100644 index 0000000..f86a66f --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCM/closedFrequentItemSets/db=4.minSupp=2.minSize=2.golden @@ -0,0 +1,8 @@ +[ ( 3 , fromList [ 2 , 4 ] ) +, ( 4 , fromList [ 2 , 5 ] ) +, ( 4 , fromList [ 4 , 6 ] ) +, ( 3 , fromList [ 1 , 4 , 6 ] ) +, ( 2 , fromList [ 1 , 2 , 4 , 5 , 6 ] ) +, ( 2 , fromList [ 2 , 3 , 5 ] ) +, ( 2 , fromList [ 3 , 4 , 6 ] ) +] \ No newline at end of file diff --git a/tests/Clustering/FrequentItemSet/LCMSpec.hs b/tests/Clustering/FrequentItemSet/LCMSpec.hs new file mode 100644 index 0000000..160a52f --- /dev/null +++ b/tests/Clustering/FrequentItemSet/LCMSpec.hs @@ -0,0 +1,149 @@ +{-# LANGUAGE OverloadedLists #-} +{-# LANGUAGE OverloadedStrings #-} + +module Clustering.FrequentItemSet.LCMSpec where + +import Control.Monad (Monad (..), forM_) +import Data.Array.Base qualified as Array +import Data.Bool (Bool (..), (&&)) +import Data.Eq (Eq (..)) +import Data.Function (($), (.)) +import Data.Functor ((<$>)) +import Data.GenValidity +import Data.GenValidity.Map () +import Data.GenValidity.Set () +import Data.GenValidity.Text () +import Data.GenValidity.Time () +import Data.Int (Int) +import Data.List qualified as List +import Data.Map.Strict qualified as Map +import Data.Ord (Down (..), comparing, (<=)) +import Data.Set (Set) +import Data.Set qualified as Set +import Data.Text.Short (ShortText) +import Data.Text.Short qualified as ShortText +import Data.Tuple (snd) +import Data.Validity +import Data.Validity.Map () +import Data.Validity.Set () +import Data.Validity.Text () +import Numeric.Decimal (Decimal (..), unwrapDecimal) +import System.FilePath ((<.>)) +import Test.Syd +import Test.Syd.Validity +import Text.Show (Show (..)) + +import Logic + +import Clustering.FrequentItemSet.LCM +import Utils + +-- * Type 'Transaction' +data Transaction a = Transaction a + +-- ** Type 'TransactionItems' +newtype TransactionItems a = TransactionItems {unTransactionItems :: [a]} + deriving (Eq, Show) +instance (Eq a, Validity a) => Validity (TransactionItems a) where + validate (TransactionItems is) = + mconcat + [ delve "transaction item" is + , declare + "All transaction items are different" + (List.length (List.nub is) == List.length is) + ] +instance (GenValid a, Eq a) => GenValid (TransactionItems a) where + genValid = TransactionItems . List.nub <$> genValid + shrinkValid = (TransactionItems <$>) . shrinkValid . unTransactionItems + +databases :: [(ShortText, [Set Int])] +databases = + [ "1" := [[10, 20, 30]] + , "2" := [[10, 20, 30], [10, 20]] + , "3" := [[10, 20, 30], [10, 20], [10]] + , "4" + := [ [1, 2, 3, 4, 5, 6] + , [2, 3, 5] + , [2, 5] + , [1, 2, 4, 5, 6] + , [2, 4] + , [1, 4, 6] + , [3, 4, 6] + ] + ] + +spec :: Spec +spec = do + {- + genValidSpec @(TransactionItems Item) + it "solves a singleton transaction" do + runLCMmatrix + [ [1] + ] + 1 + `shouldBe` [ [1, 1] + ] + it "solves a basic example" do + runLCMmatrix + [ [1] + , [1, 2] + ] + 1 + `shouldBe` [ [2, 1] + , [1, 1, 2] + ] + it "solves a another basic example" do + runLCMmatrix + [ [1] + , [1, 2] + , [1, 2] + ] + 1 + `shouldBe` [ [3, 1] + , [2, 1, 2] + ] + it "solves HLCM's example" do + runLCMmatrix + [ [1, 2, 3, 4, 5, 6] + , [2, 3, 5] + , [2, 5] + , [1, 2, 4, 5, 6] + , [2, 4] + , [1, 4, 6] + , [3, 4, 6] + ] + 3 + `shouldBe` [ [5, 4] + , [5, 2] + , [3, 2, 4] + , [4, 4, 6] + , [4, 2, 5] + , [3, 3] + , [3, 1, 4, 6] + ] + describe "sortFrq" do + forM_ (List.zip databases [1 :: Int ..]) \(db, dbI) -> do + let maxItem = List.maximum (Set.findMax <$> db) + let itemToSupp = histogram (0, maxItem) db + let (lo, hi) = Array.bounds itemToSupp + let lstVal = [(i, itemToSupp Array.! i) | i <- [lo .. hi]] + goldenShow ("db" <> show dbI) $ + List.sortBy (comparing (Down . snd)) lstVal + -- sortFrq lstVal + describe "permut" do + forM_ (List.zip databases [1 :: Int ..]) \(db, dbI) -> do + let maxItem = List.maximum (Set.findMax <$> db) + let itemToSupp = histogram (0, maxItem) db + goldenShow ("db" <> show dbI) $ + permut itemToSupp + -} + describe "closedFrequentItemSets" do + forM_ databases \(dbName, db) -> do + letName db $ \dbNamed -> + forM_ ([1 .. 2] :: [Int]) \minSupp -> + forM_ ([1 .. 2] :: [Int]) \minSize -> + goldenShow ("db=" <> ShortText.unpack dbName <.> "minSupp=" <> show minSupp <.> "minSize=" <> show minSize) $ + allClosedFrequentItemSets + (assertStrictlyPositive minSupp) + (assertStrictlyPositive minSize) + dbNamed diff --git a/tests/Phylomemy/IndexationSpec.hs b/tests/Phylomemy/IndexationSpec.hs new file mode 100644 index 0000000..1f3de38 --- /dev/null +++ b/tests/Phylomemy/IndexationSpec.hs @@ -0,0 +1,63 @@ +{-# OPTIONS_GHC -Wno-orphans #-} + +module Phylomemy.IndexationSpec where + +import Data.Eq (Eq) +import Data.Function ((.)) +import Data.Functor ((<$>)) +import Data.GenValidity +import Data.GenValidity.Map () +import Data.GenValidity.Sequence () +import Data.GenValidity.Set () +import Data.GenValidity.Text () +import Data.GenValidity.Time () +import Data.Int (Int) +import Data.Ord (Ord) +import Data.Sequence qualified as Seq +import Data.Text.Short (ShortText) +import Data.Text.Short qualified as ShortText +import Data.Time (UTCTime) +import Data.Validity.Map () +import Data.Validity.Set () +import Data.Validity.Text () +import GHC.Generics (Generic) +import Test.Syd +import Test.Syd.Validity +import Text.Show (Show (..)) +import Prelude (Num) + +import Phylomemy + +import Clustering.FrequentItemSet.BruteForce qualified as Clustering + +instance Validity ShortText where + validate = trivialValidation +instance GenValid ShortText where + genValid = ShortText.fromText <$> genValid + shrinkValid = (ShortText.fromText <$>) . shrinkValid . ShortText.toText +instance GenValid Ngram +instance GenValid Root +instance (Validity pos, GenValid pos) => GenValid (Document pos) +instance GenValid CoOccurences where + genValid = genValidStructurallyWithoutExtraChecking + shrinkValid = shrinkValidStructurallyWithoutExtraFiltering +instance (Ord item, Validity item, Validity a, GenValid item, GenValid a) => GenValid (Clustering.Transaction item a) +instance (Ord pos, Validity pos, GenValid pos) => GenValid (Range pos) + +newtype Pos = Pos Int + deriving (Eq, Ord, Show, Num, Generic) +instance ShowHuman Pos where + showHuman (Pos x) = show x +instance Validity Pos +instance GenValid Pos + +type Rang = Range Pos + +spec :: Spec +spec = do + genValidSpec @(Document UTCTime) + monoidSpec @CoOccurences + genValidSpec @Cluster + genValidSpec @(Clustering.Transaction Root (Document ())) + genValidSpec @(Rang :-> Clustering.Transaction Root (Document ())) + genValidSpec @(Rang :-> Cluster :-> Seq.Seq (Clustering.Transaction Root (Document Pos))) diff --git a/tests/Phylomemy/SimilaritySpec.hs b/tests/Phylomemy/SimilaritySpec.hs new file mode 100644 index 0000000..e46020a --- /dev/null +++ b/tests/Phylomemy/SimilaritySpec.hs @@ -0,0 +1,36 @@ +{-# OPTIONS_GHC -Wno-orphans #-} + +module Phylomemy.SimilaritySpec where + +import Data.Function ((.)) +import Data.Functor ((<$>)) +import Data.GenValidity.Map () +import Data.GenValidity.Set () +import Data.GenValidity.Text () +import Data.GenValidity.Time () +import Data.Validity.Map () +import Data.Validity.Set () +import Data.Validity.Text () +import Numeric.Decimal (Decimal (..), unwrapDecimal) +import Numeric.Probability (Probability, ProbabilityBounded (..)) +import Test.Syd +import Test.Syd.Validity + +import Phylomemy +import Phylomemy.IndexationSpec () + +instance GenValid ProbabilityBounded where + genValid = genWordX + shrinkValid = (ProbabilityBounded <$>) . shrinkValid . unProbabilityBounded +instance GenValid Probability where + genValid = Decimal <$> genValid + shrinkValid = (Decimal <$>) . shrinkValid . unwrapDecimal + +-- instance GenValid (Similarities Probability) where +-- genValid = genValidStructurallyWithoutExtraChecking +-- shrinkValid = shrinkValidStructurallyWithoutExtraFiltering + +spec :: Spec +spec = do + genValidSpec @Probability + genValidSpec @(Similarities Probability) diff --git a/tests/Phylomemy/TemporalMatchingSpec.hs b/tests/Phylomemy/TemporalMatchingSpec.hs new file mode 100644 index 0000000..eb9421e --- /dev/null +++ b/tests/Phylomemy/TemporalMatchingSpec.hs @@ -0,0 +1,161 @@ +{-# LANGUAGE OverloadedLists #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ParallelListComp #-} +{-# OPTIONS_GHC -Wno-orphans #-} + +module Phylomemy.TemporalMatchingSpec where + +import Control.Monad (Monad (..), foldM, foldM_, forM_, void) +import Data.Function (($), (&), (.)) +import Data.Functor (Functor (..), (<$>), (<&>)) +import Data.GenValidity +import Data.GenValidity.Map () +import Data.GenValidity.Sequence () +import Data.GenValidity.Set () +import Data.GenValidity.Text () +import Data.GenValidity.Time () +import Data.Int (Int) +import Data.List qualified as List +import Data.Map.Strict qualified as Map +import Data.Maybe (Maybe (..), fromJust) +import Data.Ord (Ord) +import Data.Sequence qualified as Seq +import Data.Set qualified as Set +import Data.Text.Short (ShortText) +import Data.Text.Short qualified as ShortText +import Data.Time (UTCTime) +import Data.Tree qualified as Tree +import Data.Validity.Map () +import Data.Validity.Set () +import Data.Validity.Text () +import Debug.Pretty.Simple (pTraceShow, pTraceShowId) +import GHC.IsList (toList) +import Logic +import Numeric (showFFloat) +import Numeric.Natural (Natural) +import System.FilePath ((<.>)) +import Test.Syd +import Test.Syd.Validity +import Text.Show (Show (..)) +import Prelude (Double, Integral (..), Num (..), Rational, fromRational) + +import Phylomemy +import Phylomemy.IndexationSpec (Pos (..), Rang) + +import Utils + +rangeToDocs0 :: Pos :-> Seq.Seq (Document Pos) +rangeToDocs0 = + Map.fromList + [ ( Pos rangeIndex + , Seq.fromList + [ Document + { documentPosition = Pos (2 * rangeIndex + 3 * docIndex) + , documentRoots = Map.fromList [(r, ()) | r <- roots] + } + | roots <- docs + | docIndex <- [1 ..] + ] + ) + | docs <- + [ [["a", "b", "c"], ["a", "d", "e"], ["e", "f", "g"]] + , [["a", "b"], ["d", "f"], ["a", "d"]] + , [["f"], ["d", "f"], ["f", "g", "a"]] + , [["b", "c", "e"], ["a", "d", "e"], ["a", "b", "c"]] + , [["d", "f", "g"], ["b", "f"], ["a", "c", "d"], ["a", "f"]] + , [["c", "d", "g"], ["b", "c", "g"], ["a", "b", "c"], ["e", "g"]] + ] + | rangeIndex <- [1 ..] + ] + +{- +rangeToDocs0 :: Pos :-> Seq.Seq (Document Pos) +rangeToDocs0 = + Map.fromListWith + (<>) + [ (Pos (pos - (pos `rem` 2)), Seq.singleton doc) + | doc <- docs0 & toList + , let Pos pos = documentPosition doc + ] +-} + +spec :: Spec +spec = do + {- + describe "splitMaximalSpanningTree" do + let mst0 :: MaximalSpanningTree Int Int = + Tree.Node MSTNode{mstNodeRangeCluster=(2,1), mstNodeSimilarity=proba1} + [ Tree.Node MSTNode{mstNodeRangeCluster=(1,1), mstNodeSimilarity=proba0} [] + , Tree.Node MSTNode{mstNodeRangeCluster=(1,2), mstNodeSimilarity=assertProbability 0.2} [] + , Tree.Node MSTNode{mstNodeRangeCluster=(1,3), mstNodeSimilarity=assertProbability 0.3} + [ Tree.Node MSTNode{mstNodeRangeCluster=(2,3), mstNodeSimilarity=assertProbability 0.3} [] + , Tree.Node MSTNode{mstNodeRangeCluster=(2,4), mstNodeSimilarity=proba0} + [ Tree.Node MSTNode{mstNodeRangeCluster=(1,4), mstNodeSimilarity=assertProbability 0.2} + [ Tree.Node MSTNode{mstNodeRangeCluster=(2,2), mstNodeSimilarity=assertProbability 0.3} [] + , Tree.Node MSTNode{mstNodeRangeCluster=(2,5), mstNodeSimilarity=assertProbability 0.3} [] + ] + ] + ] + ] + goldenBuilder ("mst=0" <.> "split=0") $ + dotMaximalSpanningTrees [mst0] + goldenBuilder ("mst=0" <.> "split=1") $ + dotMaximalSpanningTrees $ + splitMaximalSpanningTree mst0 + goldenBuilder ("mst=0" <.> "split=2") $ + dotMaximalSpanningTrees $ + mst0 + & splitMaximalSpanningTree + >>= splitMaximalSpanningTree + goldenBuilder ("mst=0" <.> "split=3") $ + dotMaximalSpanningTrees $ + mst0 + & splitMaximalSpanningTree + >>= splitMaximalSpanningTree + >>= splitMaximalSpanningTree + -} + letName rangeToDocs0 $ \rangeToDocs -> + letName ["a", "b", "c", "d", "e", "f", "g"] $ \roots -> + forM_ ([1 .. 1] :: [Int]) \minSupp -> + forM_ ([2 .. 2] :: [Int]) \minSize -> do + let clusters = clusterize roots (assertStrictlyPositive minSupp) (assertStrictlyPositive minSize) rangeToDocs + -- let allSimils = allSimilarities similarityJaccard (clusters <&> unName) + let msf = maximalSpanningForest similarityJaccard (clusters <&> unName) + describe "dotMaximalSpanningTrees" do + forM_ ([0, 0.3, 1] :: [Rational]) \lambda -> do + letName (predictionMeasureF (assertProbability lambda)) \predMeasure -> do + goldenBuilder ("docs=docs0" <.> "minSupp=" <> show minSupp <.> "minSize=" <> show minSize <.> "lambda=" <> showFFloat (Just 2) (fromRational @Double lambda) "") $ + dotMaximalSpanningForest $ + msfSplit predMeasure roots msf + +-- describe "dotMaximalSpanningTrees" do +-- ([Map.keysSet similToMST | similToMST <- msf & Map.elems] & Set.unions & toList) +-- & (`foldM_` msf) \acc simil -> do +-- let similS = showFFloat Nothing (fromRational @Double (runProbability simil)) "" +-- let acc' = acc & (`Map.foldrWithKey` Map.empty) \_simil mst -> +-- Map.unionWith (Map.unionWith (Map.unionWith (Seq.><))) $ +-- splitMaximalSpanningTree mst +-- goldenBuilder ("docs=docs0" <.> "minSupp=" <> show minSupp <.> "minSize=" <> show minSize <.> "simil=" <> similS) $ +-- dotMaximalSpanningTrees acc +-- return acc' +-- describe "splitMaximalSpanningTrees" do +-- forM_ ([0] :: [Rational]) \lambda -> do +-- letName (predictionMeasureF (assertProbability lambda)) \predMeasure -> do +-- goldenBuilder ("docs=docs0" <.> "minSupp=" <> show minSupp <.> "minSize=" <> show minSize <.> "lambda=" <> showFFloat (Just 2) (fromRational @Double lambda) "") $ +-- dotMaximalSpanningTrees $ +-- splitMaximalSpanningTrees predMeasure roots msf +-- describe "dotSimilarities" do +-- forM_ ([0] :: [Rational]) \lambda -> do +-- --letName (predictionMeasureF (assertProbability lambda)) \predMeasure -> do +-- goldenBuilder ("docs=docs0" <.> "minSupp=" <> show minSupp <.> "minSize=" <> show minSize <.> "lambda=" <> showFFloat (Just 2) (fromRational @Double lambda) "") $ +-- dotSimilarities +-- --(splitMaximalSpanningTrees predMeasure roots msf) +-- msf +-- allSimils +-- describe "similarities" do +-- goldenShow ("docs=docs0" <.> "minSupp=" <> show minSupp <.> "minSize=" <> show minSize) weights +-- describe "phylomemyDOT" do +-- forM_ weights \minWeight -> +-- goldenBuilder ("docs=docs0" <.> "minSupp=" <> show minSupp <.> "minSize=" <> show minSize <.> "minWeight=" <> show minWeight) $ +-- phylomemyDOT $ +-- phylomemyRaise minWeight phy diff --git a/tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=0.00.golden b/tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=0.00.golden new file mode 100644 index 0000000..076a302 --- /dev/null +++ b/tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=0.00.golden @@ -0,0 +1,161 @@ +// fromList [(Pos 1,[1,2,3]),(Pos 2,[4,5,6]),(Pos 3,[7,8]),(Pos 4,[9,10,11,12]),(Pos 5,[13,14,15,16]),(Pos 6,[17,18,19,20,21,22])] +digraph g +{ + splines="ortho" + subgraph cluster_r1 + { + // Create a node for the range r1 + r1[shape=box,label="1",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r1 + r1t1c1[label="a & b & c +T001",style=filled,colorscheme=ylorrd9,shape=box] + r1t2c1[label="a & d & e +T002",style=filled,colorscheme=ylorrd9,shape=box] + r1t3c1[label="e & f & g +T003",style=filled,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r1 -> r1t1c1[style=invis] + r1t1c1 -> r1t2c1[weight=10,style=invis] + r1t2c1 -> r1t3c1[weight=10,style=invis] + } + } + subgraph cluster_r2 + { + // Create a node for the range r2 + r2[shape=box,label="2",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r2 + r2t4c1[label="a & b +T004",style=filled,colorscheme=ylorrd9,shape=box] + r2t5c1[label="a & d +T005",style=filled,colorscheme=ylorrd9,shape=box] + r2t6c1[label="d & f +T006",style=filled,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r2 -> r2t4c1[style=invis] + r2t4c1 -> r2t5c1[weight=10,style=invis] + r2t5c1 -> r2t6c1[weight=10,style=invis] + } + } + subgraph cluster_r3 + { + // Create a node for the range r3 + r3[shape=box,label="3",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r3 + r3t7c1[label="a & f & g +T007",style=filled,colorscheme=ylorrd9,shape=box] + r3t8c1[label="d & f +T008",style=filled,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r3 -> r3t7c1[style=invis] + r3t7c1 -> r3t8c1[weight=10,style=invis] + } + } + subgraph cluster_r4 + { + // Create a node for the range r4 + r4[shape=box,label="4",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r4 + r4t9c1[label="a & b & c +T009",style=filled,colorscheme=ylorrd9,shape=box] + r4t10c1[label="a & d & e +T010",style=filled,colorscheme=ylorrd9,shape=box] + r4t11c1[label="b & c +T011",style=filled,colorscheme=ylorrd9,shape=box] + r4t12c1[label="b & c & e +T012",style=filled,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r4 -> r4t9c1[style=invis] + r4t9c1 -> r4t10c1[weight=10,style=invis] + r4t10c1 -> r4t11c1[weight=10,style=invis] + r4t11c1 -> r4t12c1[weight=10,style=invis] + } + } + subgraph cluster_r5 + { + // Create a node for the range r5 + r5[shape=box,label="5",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r5 + r5t13c1[label="a & c & d +T013",style=filled,colorscheme=ylorrd9,shape=box] + r5t14c1[label="a & f +T014",style=filled,colorscheme=ylorrd9,shape=box] + r5t15c1[label="b & f +T015",style=filled,colorscheme=ylorrd9,shape=box] + r5t16c1[label="d & f & g +T016",style=filled,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r5 -> r5t13c1[style=invis] + r5t13c1 -> r5t14c1[weight=10,style=invis] + r5t14c1 -> r5t15c1[weight=10,style=invis] + r5t15c1 -> r5t16c1[weight=10,style=invis] + } + } + subgraph cluster_r6 + { + // Create a node for the range r6 + r6[shape=box,label="6",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r6 + r6t17c1[label="a & b & c +T017",style=filled,colorscheme=ylorrd9,shape=box] + r6t18c1[label="b & c +T018",style=filled,colorscheme=ylorrd9,shape=box] + r6t19c1[label="b & c & g +T019",style=filled,colorscheme=ylorrd9,shape=box] + r6t20c1[label="c & d & g +T020",style=filled,colorscheme=ylorrd9,shape=box] + r6t21c1[label="c & g +T021",style=filled,colorscheme=ylorrd9,shape=box] + r6t22c1[label="e & g +T022",style=filled,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r6 -> r6t17c1[style=invis] + r6t17c1 -> r6t18c1[weight=10,style=invis] + r6t18c1 -> r6t19c1[weight=10,style=invis] + r6t19c1 -> r6t20c1[weight=10,style=invis] + r6t20c1 -> r6t21c1[weight=10,style=invis] + r6t21c1 -> r6t22c1[weight=10,style=invis] + } + } + // Create the edges of the MST 1 + // Create the edges of the MST 2 + // Create the edges of the MST 3 + // Create the edges of the MST 4 + // Create the edges of the MST 5 + // Create the edges of the MST 6 + // Create the edges of the MST 7 + // Create the edges of the MST 8 + // Create the edges of the MST 9 + // Create the edges of the MST 10 + // Create the edges of the MST 11 + // Create the edges of the MST 12 + // Create the edges of the MST 13 + // Create the edges of the MST 14 + // Create the edges of the MST 15 + // Create the edges of the MST 16 + // Create the edges of the MST 17 + // Create the edges of the MST 18 + // Create the edges of the MST 19 + // Create the edges of the MST 20 + // Create the edges of the MST 21 + // Create the edges of the MST 22 + // Vertically align range nodes + r1 -> r2 -> r3 -> r4 -> r5 -> r6[weight=10,style=invis] +} diff --git a/tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=0.30.golden b/tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=0.30.golden new file mode 100644 index 0000000..58fd974 --- /dev/null +++ b/tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=0.30.golden @@ -0,0 +1,180 @@ +// fromList [(Pos 1,[1,2,5]),(Pos 2,[1,3,5]),(Pos 3,[3,4]),(Pos 4,[1,5]),(Pos 5,[1,3,4,6]),(Pos 6,[2,5,7,8])] +digraph g +{ + splines="ortho" + subgraph cluster_r1 + { + // Create a node for the range r1 + r1[shape=box,label="1",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r1 + r1t1c1[label="a & d & e +T001 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r1t2c1[label="e & f & g +T002 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r1t5c1[label="a & b & c +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r1 -> r1t1c1[style=invis] + r1t1c1 -> r1t2c1[weight=10,style=invis] + r1t2c1 -> r1t5c1[weight=10,style=invis] + } + } + subgraph cluster_r2 + { + // Create a node for the range r2 + r2[shape=box,label="2",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r2 + r2t1c1[label="a & d +T001 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r2t3c1[label="d & f +T003 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r2t5c1[label="a & b +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r2 -> r2t1c1[style=invis] + r2t1c1 -> r2t3c1[weight=10,style=invis] + r2t3c1 -> r2t5c1[weight=10,style=invis] + } + } + subgraph cluster_r3 + { + // Create a node for the range r3 + r3[shape=box,label="3",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r3 + r3t3c1[label="d & f +T003 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r3t4c1[label="a & f & g +T004 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r3 -> r3t3c1[style=invis] + r3t3c1 -> r3t4c1[weight=10,style=invis] + } + } + subgraph cluster_r4 + { + // Create a node for the range r4 + r4[shape=box,label="4",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r4 + r4t1c1[label="a & d & e +T001 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r4t5c1[label="a & b & c +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r4t5c2[label="b & c +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r4t5c3[label="b & c & e +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r4 -> r4t1c1[style=invis] + r4t1c1 -> r4t5c1[weight=10,style=invis] + r4t5c1 -> r4t5c2[weight=10,style=invis] + r4t5c2 -> r4t5c3[weight=10,style=invis] + } + } + subgraph cluster_r5 + { + // Create a node for the range r5 + r5[shape=box,label="5",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r5 + r5t1c1[label="a & c & d +T001 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r5t3c1[label="d & f & g +T003 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r5t4c1[label="a & f +T004 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r5t6c1[label="b & f +T006",style=filled,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r5 -> r5t1c1[style=invis] + r5t1c1 -> r5t3c1[weight=10,style=invis] + r5t3c1 -> r5t4c1[weight=10,style=invis] + r5t4c1 -> r5t6c1[weight=10,style=invis] + } + } + subgraph cluster_r6 + { + // Create a node for the range r6 + r6[shape=box,label="6",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r6 + r6t2c1[label="e & g +T002 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r6t5c1[label="a & b & c +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r6t5c2[label="b & c +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r6t5c3[label="b & c & g +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r6t7c1[label="c & d & g +T007",style=filled,colorscheme=ylorrd9,shape=box] + r6t8c1[label="c & g +T008",style=filled,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r6 -> r6t2c1[style=invis] + r6t2c1 -> r6t5c1[weight=10,style=invis] + r6t5c1 -> r6t5c2[weight=10,style=invis] + r6t5c2 -> r6t5c3[weight=10,style=invis] + r6t5c3 -> r6t7c1[weight=10,style=invis] + r6t7c1 -> r6t8c1[weight=10,style=invis] + } + } + // Create the edges of the MST 1 + r1t1c1 -> r5t1c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t1c1 -> r2t1c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t1c1 -> r4t1c1[constraint=false,color=0,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + // Create the edges of the MST 2 + r1t2c1 -> r6t2c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + // Create the edges of the MST 3 + r2t3c1 -> r5t3c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r2t3c1 -> r3t3c1[constraint=false,color=0,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + // Create the edges of the MST 4 + r3t4c1 -> r5t4c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + // Create the edges of the MST 5 + r4t5c3 -> r1t5c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t5c1 -> r6t5c3[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t5c1 -> r4t5c2[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r4t5c2 -> r6t5c2[constraint=false,color=0,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t5c1 -> r2t5c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t5c1 -> r6t5c1[constraint=false,color=0,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t5c1 -> r4t5c1[constraint=false,color=0,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + // Create the edges of the MST 6 + // Create the edges of the MST 7 + // Create the edges of the MST 8 + // Vertically align range nodes + r1 -> r2 -> r3 -> r4 -> r5 -> r6[weight=10,style=invis] +} diff --git a/tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=1.00.golden b/tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=1.00.golden new file mode 100644 index 0000000..58fd974 --- /dev/null +++ b/tests/Phylomemy/TemporalMatchingSpec/dotMaximalSpanningTrees/docs=docs0.minSupp=1.minSize=2.lambda=1.00.golden @@ -0,0 +1,180 @@ +// fromList [(Pos 1,[1,2,5]),(Pos 2,[1,3,5]),(Pos 3,[3,4]),(Pos 4,[1,5]),(Pos 5,[1,3,4,6]),(Pos 6,[2,5,7,8])] +digraph g +{ + splines="ortho" + subgraph cluster_r1 + { + // Create a node for the range r1 + r1[shape=box,label="1",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r1 + r1t1c1[label="a & d & e +T001 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r1t2c1[label="e & f & g +T002 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r1t5c1[label="a & b & c +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r1 -> r1t1c1[style=invis] + r1t1c1 -> r1t2c1[weight=10,style=invis] + r1t2c1 -> r1t5c1[weight=10,style=invis] + } + } + subgraph cluster_r2 + { + // Create a node for the range r2 + r2[shape=box,label="2",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r2 + r2t1c1[label="a & d +T001 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r2t3c1[label="d & f +T003 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r2t5c1[label="a & b +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r2 -> r2t1c1[style=invis] + r2t1c1 -> r2t3c1[weight=10,style=invis] + r2t3c1 -> r2t5c1[weight=10,style=invis] + } + } + subgraph cluster_r3 + { + // Create a node for the range r3 + r3[shape=box,label="3",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r3 + r3t3c1[label="d & f +T003 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r3t4c1[label="a & f & g +T004 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r3 -> r3t3c1[style=invis] + r3t3c1 -> r3t4c1[weight=10,style=invis] + } + } + subgraph cluster_r4 + { + // Create a node for the range r4 + r4[shape=box,label="4",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r4 + r4t1c1[label="a & d & e +T001 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r4t5c1[label="a & b & c +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r4t5c2[label="b & c +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r4t5c3[label="b & c & e +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r4 -> r4t1c1[style=invis] + r4t1c1 -> r4t5c1[weight=10,style=invis] + r4t5c1 -> r4t5c2[weight=10,style=invis] + r4t5c2 -> r4t5c3[weight=10,style=invis] + } + } + subgraph cluster_r5 + { + // Create a node for the range r5 + r5[shape=box,label="5",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r5 + r5t1c1[label="a & c & d +T001 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r5t3c1[label="d & f & g +T003 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r5t4c1[label="a & f +T004 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r5t6c1[label="b & f +T006",style=filled,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r5 -> r5t1c1[style=invis] + r5t1c1 -> r5t3c1[weight=10,style=invis] + r5t3c1 -> r5t4c1[weight=10,style=invis] + r5t4c1 -> r5t6c1[weight=10,style=invis] + } + } + subgraph cluster_r6 + { + // Create a node for the range r6 + r6[shape=box,label="6",color=gray,style=filled,fillcolor=gray] + color=gray + { + rank=same + // Create the cluster nodes within the range r6 + r6t2c1[label="e & g +T002 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r6t5c1[label="a & b & c +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r6t5c2[label="b & c +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r6t5c3[label="b & c & g +T005 +0.67",style=filled,fillcolor=7,colorscheme=ylorrd9,shape=box] + r6t7c1[label="c & d & g +T007",style=filled,colorscheme=ylorrd9,shape=box] + r6t8c1[label="c & g +T008",style=filled,colorscheme=ylorrd9,shape=box] + // Horizontally align the cluster nodes within the same range + r6 -> r6t2c1[style=invis] + r6t2c1 -> r6t5c1[weight=10,style=invis] + r6t5c1 -> r6t5c2[weight=10,style=invis] + r6t5c2 -> r6t5c3[weight=10,style=invis] + r6t5c3 -> r6t7c1[weight=10,style=invis] + r6t7c1 -> r6t8c1[weight=10,style=invis] + } + } + // Create the edges of the MST 1 + r1t1c1 -> r5t1c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t1c1 -> r2t1c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t1c1 -> r4t1c1[constraint=false,color=0,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + // Create the edges of the MST 2 + r1t2c1 -> r6t2c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + // Create the edges of the MST 3 + r2t3c1 -> r5t3c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r2t3c1 -> r3t3c1[constraint=false,color=0,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + // Create the edges of the MST 4 + r3t4c1 -> r5t4c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + // Create the edges of the MST 5 + r4t5c3 -> r1t5c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t5c1 -> r6t5c3[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t5c1 -> r4t5c2[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r4t5c2 -> r6t5c2[constraint=false,color=0,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t5c1 -> r2t5c1[constraint=false,color=6,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t5c1 -> r6t5c1[constraint=false,color=0,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + r1t5c1 -> r4t5c1[constraint=false,color=0,colorscheme=ylorrd9,fontcolor=blue,dir=both,arrowhead=dot,arrowtail=dot] + // Create the edges of the MST 6 + // Create the edges of the MST 7 + // Create the edges of the MST 8 + // Vertically align range nodes + r1 -> r2 -> r3 -> r4 -> r5 -> r6[weight=10,style=invis] +} diff --git a/tests/Spec.hs b/tests/Spec.hs new file mode 100644 index 0000000..b3ff7b9 --- /dev/null +++ b/tests/Spec.hs @@ -0,0 +1,21 @@ +{-# OPTIONS_GHC -w -Wall -fno-warn-missing-signatures -fno-warn-unused-imports #-} + +import Test.Syd +import Prelude qualified + +import Clustering.FrequentItemSet.BruteForceSpec qualified +import Clustering.FrequentItemSet.LCMSpec qualified +import Phylomemy.IndexationSpec qualified +import Phylomemy.SimilaritySpec qualified +import Phylomemy.TemporalMatchingSpec qualified + +main :: Prelude.IO () +main = sydTest spec + +spec = do + -- describe "Clustering.FrequentItemSet.BruteForce" Clustering.FrequentItemSet.BruteForceSpec.spec + --describe "Clustering.FrequentItemSet.LCM" Clustering.FrequentItemSet.LCMSpec.spec + describe "Phylomemy.TemporalMatchingSpec" Phylomemy.TemporalMatchingSpec.spec + +-- describe "Phylomemy.IndexationSpec" Phylomemy.IndexationSpec.spec +-- describe "Phylomemy.SimilaritySpec" Phylomemy.SimilaritySpec.spec diff --git a/tests/Utils.hs b/tests/Utils.hs new file mode 100644 index 0000000..40b85a6 --- /dev/null +++ b/tests/Utils.hs @@ -0,0 +1,50 @@ +{-# LANGUAGE UndecidableInstances #-} +module Utils where + +import Data.ByteString.Builder qualified as BS +import Control.Monad (Monad(..)) +import Data.Either (fromRight) +import Data.Function ((&), (.)) +import Data.Functor ((<&>)) +import Data.List qualified as List +import Data.Ord (Ord) +import Data.String (String) +import Data.Text qualified as Text +import Logic +import Logic.Theory.Arithmetic +import Logic.Theory.Ord +import Prelude (undefined) +import System.FilePath (joinPath, pathSeparator, (<.>), ()) +import Test.Syd +import Text.Show (Show (..)) +import System.FilePath qualified as Sys + +assertStrictlyPositive :: Ord a => Zeroable a => a -> () ::: a / () > Zero +assertStrictlyPositive i = unitName i / fromRight undefined (prove (unitName i > zero)) + +goldenPath :: + Sys.FilePath -> + TestDefM outers inner Sys.FilePath +goldenPath msg = do + descrPath <- getTestDescriptionPath + let dirPath = + List.reverse descrPath + <&> Text.unpack . Text.replace (Text.pack ".") (Text.singleton pathSeparator) + & joinPath + return ("tests" dirPath msg <.> "golden") + +goldenShow :: Show a => String -> a -> TestDefM outers () () +goldenShow msg a = do + path <- goldenPath msg + it msg do + goldenPrettyShowInstance path a + +goldenBuilder :: String -> BS.Builder -> TestDefM outers () () +goldenBuilder msg a = do + path <- goldenPath msg + it msg do + pureGoldenByteStringBuilderFile path a + +pattern (:=) :: a -> b -> (a, b) +pattern (:=) x y = (x, y) +infixr 0 := -- 2.47.0