]> Git — Sourcephile - gargantext.git/blob - src/Gargantext/Text.hs
[Graph] add gexf exporter to graph endpoint
[gargantext.git] / src / Gargantext / Text.hs
1 {-|
2 Module : Gargantext.Text
3 Description : Ngrams tools
4 Copyright : (c) CNRS, 2018
5 License : AGPL + CECILL v3
6 Maintainer : team@gargantext.org
7 Stability : experimental
8 Portability : POSIX
9
10 Text gathers terms in unit of contexts.
11
12 -}
13
14 {-# LANGUAGE NoImplicitPrelude #-}
15 {-# LANGUAGE OverloadedStrings #-}
16 {-# LANGUAGE MultiParamTypeClasses #-}
17
18 module Gargantext.Text
19 where
20
21 import Data.Text (Text, split)
22 import Gargantext.Prelude hiding (filter)
23 import NLP.FullStop (segment)
24 import qualified Data.Text as DT
25
26 -----------------------------------------------------------------
27 -- | Why not use data ?
28 data Niveau = NiveauTexte Texte
29 | NiveauParagraphe Paragraphe
30 | NiveauPhrase Phrase
31 | NiveauMultiTerme MultiTerme
32 | NiveauMot Mot
33 | NiveauLettre Lettre
34 deriving (Show)
35
36 -- | Why use newtype ?
37 newtype Texte = Texte Text
38 newtype Paragraphe = Paragraphe Text
39 newtype Phrase = Phrase Text
40 newtype MultiTerme = MultiTerme Text
41 newtype Mot = Mot Text
42 newtype Lettre = Lettre Text
43
44 -- | Type syn seems obvious
45 type Titre = Phrase
46 -----------------------------------------------------------------
47
48 instance Show Texte where
49 show (Texte t) = show t
50
51 instance Show Paragraphe where
52 show (Paragraphe p) = show p
53
54 instance Show Phrase where
55 show (Phrase p) = show p
56
57 instance Show MultiTerme where
58 show (MultiTerme mt) = show mt
59
60 instance Show Mot where
61 show (Mot t) = show t
62
63 instance Show Lettre where
64 show (Lettre l) = show l
65
66 -----------------------------------------------------------------
67
68 class Collage sup inf where
69 dec :: sup -> [inf]
70 inc :: [inf] -> sup
71
72 instance Collage Texte Paragraphe where
73 dec (Texte t) = map Paragraphe $ DT.splitOn "\n" t
74 inc = Texte . DT.intercalate "\n" . map (\(Paragraphe t) -> t)
75
76 instance Collage Paragraphe Phrase where
77 dec (Paragraphe t) = map Phrase $ sentences t
78 inc = Paragraphe . DT.unwords . map (\(Phrase p) -> p)
79
80 instance Collage Phrase MultiTerme where
81 dec (Phrase t) = map MultiTerme $ DT.words t
82 inc = Phrase . DT.unwords . map (\(MultiTerme p) -> p)
83
84 instance Collage MultiTerme Mot where
85 dec (MultiTerme mt) = map Mot $ DT.words mt
86 inc = MultiTerme . DT.intercalate " " . map (\(Mot m) -> m)
87
88 -- | We could use Type Classes but we lose the Sum Type classification
89 toMultiTerme :: Niveau -> [MultiTerme]
90 toMultiTerme (NiveauTexte (Texte _t)) = undefined
91 toMultiTerme (NiveauPhrase p) = dec p
92 toMultiTerme (NiveauMultiTerme mt) = [mt]
93 toMultiTerme (NiveauMot _m) = undefined
94 toMultiTerme _ = undefined
95
96 -------------------------------------------------------------------
97 -- Contexts of text
98 sentences :: Text -> [Text]
99 sentences txt = map DT.pack $ segment $ DT.unpack txt
100
101 sentences' :: Text -> [Text]
102 sentences' txt = split isCharStop txt
103
104 isCharStop :: Char -> Bool
105 isCharStop c = c `elem` ['.','?','!']
106
107 unsentences :: [Text] -> Text
108 unsentences txts = DT.intercalate " " txts
109
110 -- | https://en.wikipedia.org/wiki/Text_mining
111 testText_en :: Text
112 testText_en = DT.pack "Text mining, also referred to as text data mining, roughly equivalent to text analytics, is the process of deriving high-quality information from text. High-quality information is typically derived through the devising of patterns and trends through means such as statistical pattern learning. Text mining usually involves the process of structuring the input text (usually parsing, along with the addition of some derived linguistic features and the removal of others, and subsequent insertion into a database), deriving patterns within the structured data, and finally evaluation and interpretation of the output. 'High quality' in text mining usually refers to some combination of relevance, novelty, and interestingness. Typical text mining tasks include text categorization, text clustering, concept/entity extraction, production of granular taxonomies, sentiment analysis, document summarization, and entity relation modeling (i.e., learning relations between named entities). Text analysis involves information retrieval, lexical analysis to study word frequency distributions, pattern recognition, tagging/annotation, information extraction, data mining techniques including link and association analysis, visualization, and predictive analytics. The overarching goal is, essentially, to turn text into data for analysis, via application of natural language processing (NLP) and analytical methods. A typical application is to scan a set of documents written in a natural language and either model the document set for predictive classification purposes or populate a database or search index with the information extracted."
113
114
115 testText_en_2 :: Text
116 testText_en_2 = DT.pack "It is hard to detect important articles in a specific context. Information retrieval techniques based on full text search can be inaccurate to identify main topics and they are not able to provide an indication about the importance of the article. Generating a citation network is a good way to find most popular articles but this approach is not context aware. The text around a citation mark is generally a good summary of the referred article. So citation context analysis presents an opportunity to use the wisdom of crowd for detecting important articles in a context sensitive way. In this work, we analyze citation contexts to rank articles properly for a given topic. The model proposed uses citation contexts in order to create a directed and edge-labeled citation network based on the target topic. Then we apply common ranking algorithms in order to find important articles in this newly created network. We showed that this method successfully detects a good subset of most prominent articles in a given topic. The biggest contribution of this approach is that we are able to identify important articles for a given search term even though these articles do not contain this search term. This technique can be used in other linked documents including web pages, legal documents, and patents as well as scientific papers."
117
118
119 -- | https://fr.wikipedia.org/wiki/Fouille_de_textes
120 testText_fr :: Text
121 testText_fr = DT.pack "La fouille de textes ou « l'extraction de connaissances » dans les textes est une spécialisation de la fouille de données et fait partie du domaine de l'intelligence artificielle. Cette technique est souvent désignée sous l'anglicisme text mining. Elle désigne un ensemble de traitements informatiques consistant à extraire des connaissances selon un critère de nouveauté ou de similarité dans des textes produits par des humains pour des humains. Dans la pratique, cela revient à mettre en algorithme un modèle simplifié des théories linguistiques dans des systèmes informatiques d'apprentissage et de statistiques. Les disciplines impliquées sont donc la linguistique calculatoire, l'ingénierie des langues, l'apprentissage artificiel, les statistiques et l'informatique."
122
123 termTests :: Text
124 termTests = "It is hard to detect important articles in a specific context. Information retrieval techniques based on full text search can be inaccurate to identify main topics and they are not able to provide an indication about the importance of the article. Generating a citation network is a good way to find most popular articles but this approach is not context aware. The text around a citation mark is generally a good summary of the referred article. So citation context analysis presents an opportunity to use the wisdom of crowd for detecting important articles in a context sensitive way. In this work, we analyze citation contexts to rank articles properly for a given topic. The model proposed uses citation contexts in order to create a directed and edge-labeled citation network based on the target topic. Then we apply common ranking algorithms in order to find important articles in this newly created network. We showed that this method successfully detects a good subset of most prominent articles in a given topic. The biggest contribution of this approach is that we are able to identify important articles for a given search term even though these articles do not contain this search term. This technique can be used in other linked documents including web pages, legal documents, and patents as well as scientific papers."
125
126
127 -- | Ngrams Test
128 -- >> ngramsTest testText
129 -- 248
130 --ngramsTest :: Text -> Int
131 --ngramsTest x = length ws
132 -- where
133 -- --txt = concat <$> lines <$> clean <$> readFile filePath
134 -- txt = clean x
135 -- -- | Number of sentences
136 -- --ls = sentences $ txt
137 -- -- | Number of monograms used in the full text
138 -- ws = ngrams $ txt
139 -- -- | stem ngrams
140 -- TODO
141 -- group ngrams
142 --ocs = occ $ ws
143