]> Git — Sourcephile - gargantext.git/blob - src/Gargantext/Text.hs
[FEAT] CSV ngrams extraction, engine and search.
[gargantext.git] / src / Gargantext / Text.hs
1 {-|
2 Module : Gargantext.Text
3 Description : Ngrams tools
4 Copyright : (c) CNRS, 2018
5 License : AGPL + CECILL v3
6 Maintainer : team@gargantext.org
7 Stability : experimental
8 Portability : POSIX
9
10 Ngrams exctration.
11
12 Definitions of ngrams.
13 n non negative integer
14
15 -}
16
17 {-# LANGUAGE NoImplicitPrelude #-}
18 {-# LANGUAGE OverloadedStrings #-}
19
20 module Gargantext.Text
21 where
22
23 import qualified Data.Text as DT
24 --import Data.Text.IO (readFile)
25
26
27 import Data.Map.Strict (Map
28 , lookupIndex
29 --, fromList, keys
30 )
31
32 import Data.Text (Text, split)
33 import qualified Data.Map.Strict as M (filter)
34
35 import NLP.FullStop (segment)
36 -----------------------------------------------------------------
37 import Gargantext.Text.Ngrams
38 import Gargantext.Text.Metrics.Occurrences
39
40 import qualified Gargantext.Text.Metrics.FrequentItemSet as FIS
41 import Gargantext.Prelude hiding (filter)
42 -----------------------------------------------------------------
43
44 data Group = Group { _group_label :: Ngrams
45 , _group_ngrams :: [Ngrams]
46 } deriving (Show)
47
48
49 clean :: Text -> Text
50 clean txt = DT.map clean' txt
51 where
52 clean' '’' = '\''
53 clean' c = c
54
55 --noApax :: Ord a => Map a Occ -> Map a Occ
56 --noApax m = M.filter (>1) m
57
58 -- | /!\ indexes are not the same:
59
60 -- | Index ngrams from Map
61 --indexNgram :: Ord a => Map a Occ -> Map Index a
62 --indexNgram m = fromList (zip [1..] (keys m))
63
64 -- | Index ngrams from Map
65 --ngramIndex :: Ord a => Map a Occ -> Map a Index
66 --ngramIndex m = fromList (zip (keys m) [1..])
67
68 indexWith :: Ord a => Map a Occ -> [a] -> [Int]
69 indexWith m xs = unMaybe $ map (\x -> lookupIndex x m) xs
70
71 indexIt :: Ord a => [[a]] -> (Map a Int, [[Int]])
72 indexIt xs = (m, is)
73 where
74 m = sumOcc (map occ xs)
75 is = map (indexWith m) xs
76
77 list2fis :: Ord a => FIS.Frequency -> [[a]] -> (Map a Int, [FIS.Fis])
78 list2fis n xs = (m', fs)
79 where
80 (m, is) = indexIt xs
81 m' = M.filter (>50000) m
82 fs = FIS.all n is
83
84 text2fis :: FIS.Frequency -> [Text] -> (Map Text Int, [FIS.Fis])
85 text2fis n xs = list2fis n (map ngrams xs)
86
87 --text2fisWith :: FIS.Size -> FIS.Frequency -> [Text] -> (Map Text Int, [FIS.Fis])
88 --text2fisWith = undefined
89
90 -------------------------------------------------------------------
91 -- Contexts of text
92 sentences :: Text -> [Text]
93 sentences txt = map DT.pack $ segment $ DT.unpack txt
94
95 sentences' :: Text -> [Text]
96 sentences' txt = split isStop txt
97
98 isStop :: Char -> Bool
99 isStop c = c `elem` ['.','?','!']
100
101 unsentences :: [Text] -> Text
102 unsentences txts = DT.intercalate " " txts
103
104 -- | https://en.wikipedia.org/wiki/Text_mining
105 testText_en :: Text
106 testText_en = DT.pack "Text mining, also referred to as text data mining, roughly equivalent to text analytics, is the process of deriving high-quality information from text. High-quality information is typically derived through the devising of patterns and trends through means such as statistical pattern learning. Text mining usually involves the process of structuring the input text (usually parsing, along with the addition of some derived linguistic features and the removal of others, and subsequent insertion into a database), deriving patterns within the structured data, and finally evaluation and interpretation of the output. 'High quality' in text mining usually refers to some combination of relevance, novelty, and interestingness. Typical text mining tasks include text categorization, text clustering, concept/entity extraction, production of granular taxonomies, sentiment analysis, document summarization, and entity relation modeling (i.e., learning relations between named entities). Text analysis involves information retrieval, lexical analysis to study word frequency distributions, pattern recognition, tagging/annotation, information extraction, data mining techniques including link and association analysis, visualization, and predictive analytics. The overarching goal is, essentially, to turn text into data for analysis, via application of natural language processing (NLP) and analytical methods. A typical application is to scan a set of documents written in a natural language and either model the document set for predictive classification purposes or populate a database or search index with the information extracted."
107
108 -- | https://fr.wikipedia.org/wiki/Fouille_de_textes
109 testText_fr :: Text
110 testText_fr = DT.pack "La fouille de textes ou « l'extraction de connaissances » dans les textes est une spécialisation de la fouille de données et fait partie du domaine de l'intelligence artificielle. Cette technique est souvent désignée sous l'anglicisme text mining. Elle désigne un ensemble de traitements informatiques consistant à extraire des connaissances selon un critère de nouveauté ou de similarité dans des textes produits par des humains pour des humains. Dans la pratique, cela revient à mettre en algorithme un modèle simplifié des théories linguistiques dans des systèmes informatiques d'apprentissage et de statistiques. Les disciplines impliquées sont donc la linguistique calculatoire, l'ingénierie des langues, l'apprentissage artificiel, les statistiques et l'informatique."
111
112 -- | Ngrams Test
113 -- >>> ngramsTest testText
114 -- 248
115 ngramsTest :: Text -> Int
116 ngramsTest x= length ws
117 where
118 --txt = concat <$> lines <$> clean <$> readFile filePath
119 txt = clean x
120 -- | Number of sentences
121 --ls = sentences $ txt
122 -- | Number of monograms used in the full text
123 ws = ngrams $ txt
124 -- | stem ngrams
125 -- TODO
126 -- group ngrams
127 --ocs = occ $ ws
128