2 Module : Gargantext.Core.Methods.Similarities.Accelerate.Distributional
4 Copyright : (c) CNRS, 2017-Present
5 License : AGPL + CECILL v3
6 Maintainer : team@gargantext.org
7 Stability : experimental
11 * Distributional Similarity metric
12 __Definition :__ Distributional metric is a relative metric which depends on the
13 selected list, it represents structural equivalence of mutual information.
15 __Objective :__ We want to compute with matrices processing the similarity between term $i$ and term $j$ :
16 distr(i,j)=$\frac{\Sigma_{k \neq i,j} min(\frac{n_{ik}^2}{n_{ii}n_{kk}},\frac{n_{jk}^2}{n_{jj}n_{kk}})}{\Sigma_{k \neq i}\frac{n_{ik}^2}{ n_{ii}n_{kk}}}$
18 where $n_{ij}$ is the cooccurrence between term $i$ and term $j$
20 * For a vector V=[$x_1$ ... $x_n$], we note $|V|_1=\Sigma_ix_i$
21 * operator : .* and ./ cell by cell multiplication and division of the matrix
22 * operator * is the matrix multiplication
23 * Matrice M=[$n_{ij}$]$_{i,j}$
24 * opérateur : Diag(M)=[$n_{ii}$]$_i$ (vecteur)
26 * O=[1]$_{i,j}$ (matrice one)
28 * O * D(M) =[$n_{jj}$]$_{i,j}$
29 * D(M) * O =[$n_{ii}$]$_{i,j}$
30 * $V_i=[0~0~0~1~0~0~0]'$ en i
31 * MI=(M ./ O * D(M)) .* (M / D(M) * O )
32 * distr(i,j)=$\frac{|min(V'_i * (MI-D(MI)),V'_j * (MI-D(MI)))|_1}{|V'_i.(MI-D(MI))|_1}$
34 [Finally, we have used as convention the Distributional metric used in Legacy GarganText](https://gitlab.iscpif.fr/gargantext/haskell-gargantext/issues/50)
36 mi = defaultdict(lambda : defaultdict(int))
37 total_cooc = x.sum().sum()
39 for i in matrix.keys():
40 si = sum([matrix[i][j] for j in matrix[i].keys() if i != j])
41 for j in matrix[i].keys():
42 sj = sum([matrix[j][k] for k in matrix[j].keys() if j != k])
44 mi[i][j] = log( matrix[i][j] / ((si * sj) / total_cooc) )
46 r = defaultdict(lambda : defaultdict(int))
48 for i in matrix.keys():
49 for j in matrix.keys():
52 min(mi[i][k], mi[j][k])
53 for k in matrix.keys()
54 if i != j and k != i and k != j and mi[i][k] > 0
61 for k in matrix.keys()
62 if k != i and k != j and mi[i][k] > 0
67 r[i][j] = sumMin / sumMi
68 except Exception as error:
71 # Need to filter the weak links, automatic threshold here
72 minmax = min([ max([ r[i][j] for i in r.keys()]) for j in r.keys()])
77 (i, j, {'weight': r[i][j]})
78 for i in r.keys() for j in r.keys()
79 if i != j and r[i][j] > minmax and r[i][j] > r[j][i]
84 {-# LANGUAGE TypeFamilies #-}
85 {-# LANGUAGE TypeOperators #-}
86 {-# LANGUAGE ScopedTypeVariables #-}
87 {-# LANGUAGE ViewPatterns #-}
88 {-# LANGUAGE GADTs #-}
90 module Gargantext.Core.Methods.Similarities.Accelerate.Distributional
93 -- import qualified Data.Foldable as P (foldl1)
94 -- import Debug.Trace (trace)
95 import Data.Array.Accelerate as A
96 -- import Data.Array.Accelerate.Interpreter (run)
97 import Data.Array.Accelerate.LLVM.Native (run) -- TODO: try runQ?
98 import Gargantext.Core.Methods.Matrix.Accelerate.Utils
99 import qualified Gargantext.Prelude as P
102 import Prelude (show, mappend{- , String, (<>), fromIntegral, flip -})
104 import qualified Prelude
106 -- | `distributional m` returns the distributional distance between terms each
107 -- pair of terms as a matrix. The argument m is the matrix $[n_{ij}]_{i,j}$
108 -- where $n_{ij}$ is the coocccurrence between term $i$ and term $j$.
110 -- ## Basic example with Matrix of size 3:
112 -- >>> theMatrixInt 3
113 -- Matrix (Z :. 3 :. 3)
118 -- >>> distributional $ theMatrixInt 3
119 -- Matrix (Z :. 3 :. 3)
120 -- [ 1.0, 0.0, 0.9843749999999999,
124 -- ## Basic example with Matrix of size 4:
126 -- >>> theMatrixInt 4
127 -- Matrix (Z :. 4 :. 4)
133 -- >>> distributional $ theMatrixInt 4
134 -- Matrix (Z :. 4 :. 4)
135 -- [ 1.0, 0.0, 0.5714285714285715, 0.8421052631578947,
136 -- 0.0, 1.0, 1.0, 1.0,
137 -- 8.333333333333333e-2, 4.6875e-2, 1.0, 0.25,
138 -- 0.3333333333333333, 5.7692307692307696e-2, 1.0, 1.0]
140 distributional :: Matrix Int -> Matrix Double
141 distributional m' = run $ result
143 m = map A.fromIntegral $ use m'
148 d_1 = replicate (constant (Z :. n :. All)) diag_m
149 d_2 = replicate (constant (Z :. All :. n)) diag_m
151 mi = (.*) ((./) m d_1) ((./) m d_2)
155 -- The matrix permutations is taken care of below by directly replicating
156 -- the matrix mi, making the matrix w unneccessary and saving one step.
157 w_1 = replicate (constant (Z :. All :. n :. All)) mi
158 w_2 = replicate (constant (Z :. n :. All :. All)) mi
159 w' = zipWith min w_1 w_2
161 -- The matrix ii = [r_{i,j,k}]_{i,j,k} has r_(i,j,k) = 0 if k = i OR k = j
162 -- and r_(i,j,k) = 1 otherwise (i.e. k /= i AND k /= j).
163 ii = generate (constant (Z :. n :. n :. n))
164 (lift1 (\(Z :. i :. j :. k) -> cond ((&&) ((/=) k i) ((/=) k j)) 1 0))
166 z_1 = sum ((.*) w' ii)
167 z_2 = sum ((.*) w_1 ii)
169 result = termDivNan z_1 z_2
171 logDistributional2 :: Exp Double -> Matrix Int -> Matrix Double
172 logDistributional2 o m = trace ("logDistributional2, dim=" `mappend` show n) . run
175 $ logDistributional' o n m
179 logDistributional' :: Exp Double -> Int -> Matrix Int -> Acc (Matrix Double)
180 logDistributional' o n m' = trace ("logDistributional'") result
182 -- From Matrix Int to Matrix Double, i.e :
183 -- m :: Matrix Int -> Matrix Double
184 m = map A.fromIntegral $ use m'
186 -- Scalar. Sum of all elements of m.
187 to = the $ sum (flatten m)
189 -- Diagonal matrix with the diagonal of m.
190 d_m = (.*) m (matrixIdentity n)
192 -- Size n vector. s = [s_i]_i
195 -- Matrix nxn. Vector s replicated as rows.
196 s_1 = replicate (constant (Z :. All :. n)) s
197 -- Matrix nxn. Vector s replicated as columns.
198 s_2 = replicate (constant (Z :. n :. All)) s
200 -- Matrix nxn. ss = [s_i * s_j]_{i,j}. Outer product of s with itself.
203 -- Matrix nxn. mi = [m_{i,j}]_{i,j} where
204 -- m_{i,j} = 0 if n_{i,j} = 0 or i = j,
205 -- m_{i,j} = log(to * n_{i,j} / s_{i,j}) otherwise.
206 mi = (.*) (matrixEye n)
207 (map (lift1 (\x -> cond (x == 0) 0 (log (o + x * to)))) ((./) m ss))
209 -- mi_nnz = flip indexArray Z . run $
210 -- foldAll (+) 0 $ map (\a -> ifThenElse (abs a < 10^(-6 :: Exp Int)) 0 1) mi
214 -- reportMat :: String -> Int -> Int -> String
215 -- reportMat name nnz tot = name <> ": " <> show nnz <> "nnz / " <> show tot <>
216 -- " | " <> show pc <> "%"
217 -- where pc = 100 * Prelude.fromIntegral nnz / Prelude.fromIntegral tot :: Double
219 -- Tensor nxnxn. Matrix mi replicated along the 2nd axis.
220 -- w_1 = trace (reportMat "mi" mi_nnz mi_total) $ replicate (constant (Z :. All :. n :. All)) mi
222 -- w1_nnz = flip indexArray Z . run $
223 -- foldAll (+) 0 $ map (\a -> ifThenElse (abs a < 10^(-6 :: Exp Int)) 0 1) w_1
226 -- Tensor nxnxn. Matrix mi replicated along the 1st axis.
227 -- w_2 = trace (reportMat "w1" w1_nnz w1_total) $ replicate (constant (Z :. n :. All :. All)) mi
230 -- w' = trace "w'" $ zipWith min w_1 w_2
232 -- A predicate that is true when the input (i, j, k) satisfy
234 -- k_diff_i_and_j = lift1 (\(Z :. i :. j :. k) -> ((&&) ((/=) k i) ((/=) k j)))
237 sumMin = trace "sumMin" $ sumMin_go n mi -- sum (condOrDefault k_diff_i_and_j 0 w')
239 -- Matrix nxn. All columns are the same.
240 sumM = trace "sumM" $ sumM_go n mi -- trace "sumM" $ sum (condOrDefault k_diff_i_and_j 0 w_1)
242 result = termDivNan sumMin sumM
245 -- The distributional metric P(c) of @i@ and @j@ terms is: \[
246 -- S_{MI} = \frac {\sum_{k \neq i,j ; MI_{ik} >0}^{} \min(MI_{ik},
247 -- MI_{jk})}{\sum_{k \neq i,j ; MI_{ik}>0}^{}} \]
249 -- Mutual information
250 -- \[S_{MI}({i},{j}) = \log(\frac{C{ij}}{E{ij}})\]
252 -- Number of cooccurrences of @i@ and @j@ in the same context of text
255 -- The expected value of the cooccurrences @i@ and @j@ (given a map list of size @n@)
256 -- \[E_{ij}^{m} = \frac {S_{i} S_{j}} {N_{m}}\]
258 -- Total cooccurrences of term @i@ given a map list of size @m@
259 -- \[S_{i} = \sum_{j, j \neq i}^{m} S_{ij}\]
261 -- Total cooccurrences of terms given a map list of size @m@
262 -- \[N_{m} = \sum_{i,i \neq i}^{m} \sum_{j, j \neq j}^{m} S_{ij}\]
265 logDistributional :: Matrix Int -> Matrix Double
266 logDistributional m' = run $ diagNull n $ result
268 m = map fromIntegral $ use m'
271 -- Scalar. Sum of all elements of m.
272 to = the $ sum (flatten m)
274 -- Diagonal matrix with the diagonal of m.
275 d_m = (.*) m (matrixIdentity n)
277 -- Size n vector. s = [s_i]_i
280 -- Matrix nxn. Vector s replicated as rows.
281 s_1 = replicate (constant (Z :. All :. n)) s
282 -- Matrix nxn. Vector s replicated as columns.
283 s_2 = replicate (constant (Z :. n :. All)) s
285 -- Matrix nxn. ss = [s_i * s_j]_{i,j}. Outer product of s with itself.
288 -- Matrix nxn. mi = [m_{i,j}]_{i,j} where
289 -- m_{i,j} = 0 if n_{i,j} = 0 or i = j,
290 -- m_{i,j} = log(to * n_{i,j} / s_{i,j}) otherwise.
291 mi = (.*) (matrixEye n)
292 (map (lift1 (\x -> cond (x == 0) 0 (log (x * to)))) ((./) m ss))
294 -- Tensor nxnxn. Matrix mi replicated along the 2nd axis.
295 w_1 = replicate (constant (Z :. All :. n :. All)) mi
297 -- Tensor nxnxn. Matrix mi replicated along the 1st axis.
298 w_2 = replicate (constant (Z :. n :. All :. All)) mi
301 w' = zipWith min w_1 w_2
303 -- A predicate that is true when the input (i, j, k) satisfy
305 k_diff_i_and_j = lift1 (\(Z :. i :. j :. k) -> ((&&) ((/=) k i) ((/=) k j)))
308 sumMin = sum (condOrDefault k_diff_i_and_j 0 w')
310 -- Matrix nxn. All columns are the same.
311 sumM = sum (condOrDefault k_diff_i_and_j 0 w_1)
313 result = termDivNan sumMin sumM
318 distributional'' :: Matrix Int -> Matrix Double
319 distributional'' m = -- run {- $ matMaxMini -}
326 {- from Int to Double -}
328 {- push matrix in Accelerate type -}
331 _ri :: Acc (Matrix Double) -> Acc (Matrix Double)
332 _ri mat = mat1 -- zipWith (/) mat1 mat2
334 mat1 = matSumCol n $ zipWith min (_myMin mat) (_myMin $ filterWith 0 100 $ diagNull n $ transpose mat)
337 _myMin :: Acc (Matrix Double) -> Acc (Matrix Double)
338 _myMin = replicate (constant (Z :. n :. All)) . minimum
343 s_mi :: Acc (Matrix Double) -> Acc (Matrix Double)
344 s_mi m' = zipWith (\x y -> log (x / y)) (diagNull n m')
345 $ zipWith (/) (crossProduct n m') (total m')
349 total :: Acc (Matrix Double) -> Acc (Matrix Double)
350 total = replicate (constant (Z :. n :. n)) . sum . sum
355 rIJ :: (Elt a, Ord a, P.Fractional (Exp a), P.Num a)
356 => Dim -> Acc (Matrix a) -> Acc (Matrix a)
357 rIJ n m = matMaxMini $ divide a b
362 -- * For Tests (to be removed)
363 -- | Test perfermance with this matrix
364 -- TODO : add this in a benchmark folder
366 distriTest :: Int -> Bool
367 distriTest n = logDistributional m == distributional m
374 -- compact repr of "extend along an axis" op?
375 -- general sparse repr ?
377 type Extended sh = sh :. Int
389 type Delayed sh a = Exp sh -> Exp a
391 data ExtArr sh a = ExtArr
392 { extSh :: Extended sh
393 , extFun :: Delayed (Extended sh) a
397 w_1_{i, j, k} = mi_{i, k}
398 w_2_{i, j, k} = mi_{j, k}
400 w'_{i, j, k} = min w_1_{i, j, k} w_2_{i, j, k}
401 = min mi_{i, k} mi_{j, k}
403 w"_{i, j, k} = 0 if i = k or j = k
404 min mi_{i, k} mi_{j, k} otherwise
406 w_1'_{i, j, k} = 0 if i = k or j = k
409 sumMin_{i, j} = sum_k of w"_{i, j, k}
410 = sum_k (k /= i && k /= j) of min mi_{i, k} mi_{j, k}
412 sumM_{i, j} = sum_k of w_1'_{i, j, k}
413 = sum_k (k /= i && k /= j) of mi_{i, k}
417 sumM_go :: (Elt a, Num a) => Int -> Acc (Array DIM2 a) -> Acc (Array DIM2 a)
418 sumM_go n mi = generate (lift (Z :. n :. n)) $ \coord ->
419 let (Z :. i :. j) = unlift coord in
421 [ cond (constant k /= i && constant k /= j)
422 (mi ! lift (constant Z :. i :. constant k))
427 sumMin_go :: (Elt a, Num a, Ord a) => Int -> Acc (Array DIM2 a) -> Acc (Array DIM2 a)
428 sumMin_go n mi = generate (constant (Z :. n :. n)) $ \coord ->
429 let (Z :. i :. j) = unlift coord in
431 [ cond (constant k /= i && constant k /= j)
433 (mi ! lift (constant Z :. i :. constant k))
434 (mi ! lift (constant Z :. j :. constant k))