2 Module : Gargantext.Core.NodeStory
3 Description : Node API generation
4 Copyright : (c) CNRS, 2017-Present
5 License : AGPL + CECILL v3
6 Maintainer : team@gargantext.org
7 Stability : experimental
10 A Node Story is a Map between NodeId and an Archive (with state,
11 version and history) for that node.
13 Couple of words on how this is implemented.
15 First version used files which stored Archive for each NodeId in a
18 For performance reasons, it is rewritten to use the DB.
20 The table `node_stories` contains two columns: `node_id` and
23 Next, it was observed that `a_history` in `Archive` takes much
24 space. So a new table was created, `node_story_archive_history` with
25 columns: `node_id`, `ngrams_type_id`, `patch`. This is because each
26 history item is in fact a map from `NgramsType` to `NgramsTablePatch`
27 (see the `NgramsStatePatch'` type).
29 Moreover, since in ~G.A.Ngrams.commitStatePatch~ we use current state
30 only, with only recent history items, I concluded that it is not
31 necessary to load whole history into memory. Instead, it is kept in DB
32 (history is immutable) and only recent changes are added to
33 `a_history`. Then that record is cleared whenever `Archive` is saved.
43 {-# OPTIONS_GHC -fno-warn-orphans #-}
44 {-# LANGUAGE Arrows #-}
45 {-# LANGUAGE ConstraintKinds #-}
46 {-# LANGUAGE QuasiQuotes #-}
47 {-# LANGUAGE TemplateHaskell #-}
49 module Gargantext.Core.NodeStory
57 , HasNodeStoryImmediateSaver
58 , hasNodeStoryImmediateSaver
62 , initNodeListStoryMock
70 , getNodeArchiveHistory
82 , runPGSAdvisoryUnlock
83 , runPGSAdvisoryXactLock
92 -- import Debug.Trace (traceShow)
93 import Control.Debounce (mkDebounce, defaultDebounceSettings, debounceFreq, debounceAction)
94 import Codec.Serialise.Class
95 import Control.Concurrent (MVar(), newMVar, modifyMVar_)
96 import Control.Exception (catch, throw, SomeException(..))
97 import Control.Lens (makeLenses, Getter, (^.), (.~), (%~), _Just, at, traverse, view)
98 import Control.Monad.Except
99 import Control.Monad.Reader
100 import Data.Aeson hiding ((.=), decode)
101 import Data.ByteString.Char8 (hPutStrLn)
102 import Data.Map.Strict (Map)
103 import Data.Maybe (catMaybes)
105 import Data.Pool (Pool, withResource)
106 import Data.Semigroup
107 import Database.PostgreSQL.Simple.SqlQQ (sql)
108 import Database.PostgreSQL.Simple.FromField (FromField(fromField), fromJSONField)
109 import Data.Profunctor.Product.TH (makeAdaptorAndInstance)
110 import GHC.Generics (Generic)
111 import Gargantext.API.Ngrams.Types
112 import Gargantext.Core.Types (ListId, NodeId(..), NodeType)
113 import Gargantext.Core.Utils.Prefix (unPrefix)
114 import Gargantext.Database.Admin.Config (nodeTypeId)
115 import Gargantext.Database.Prelude (CmdM', HasConnectionPool(..), HasConfig)
116 import Gargantext.Database.Query.Table.Node.Error (HasNodeError())
117 import Gargantext.Prelude
118 import Opaleye (DefaultFromField(..), SqlJsonb, fromPGSFromField)
119 import System.IO (stderr)
120 import qualified Data.Map.Strict as Map
121 import qualified Data.Map.Strict.Patch as PM
122 import qualified Data.Set as Set
123 import qualified Data.Text as Text
124 import qualified Database.PostgreSQL.Simple as PGS
125 import qualified Gargantext.Database.Query.Table.Ngrams as TableNgrams
127 ------------------------------------------------------------------------
128 data NodeStoryEnv = NodeStoryEnv
129 { _nse_var :: !(MVar NodeListStory)
130 , _nse_saver :: !(IO ())
131 , _nse_saver_immediate :: !(IO ())
132 , _nse_getter :: [NodeId] -> IO (MVar NodeListStory)
133 --, _nse_cleaner :: !(IO ()) -- every 12 hours: cleans the repos of unused NodeStories
134 -- , _nse_lock :: !FileLock -- TODO (it depends on the option: if with database or file only)
138 type HasNodeStory env err m = ( CmdM' env err m
141 , HasNodeStoryEnv env
143 , HasConnectionPool env
147 class (HasNodeStoryVar env, HasNodeStorySaver env)
148 => HasNodeStoryEnv env where
149 hasNodeStory :: Getter env NodeStoryEnv
151 class HasNodeStoryVar env where
152 hasNodeStoryVar :: Getter env ([NodeId] -> IO (MVar NodeListStory))
154 class HasNodeStorySaver env where
155 hasNodeStorySaver :: Getter env (IO ())
157 class HasNodeStoryImmediateSaver env where
158 hasNodeStoryImmediateSaver :: Getter env (IO ())
160 ------------------------------------------------------------------------
162 {- | Node Story for each NodeType where the Key of the Map is NodeId
163 TODO : generalize for any NodeType, let's start with NodeList which
164 is implemented already
166 newtype NodeStory s p = NodeStory { _unNodeStory :: Map NodeId (Archive s p) }
167 deriving (Generic, Show)
169 instance (FromJSON s, FromJSON p) => FromJSON (NodeStory s p)
170 instance (ToJSON s, ToJSON p) => ToJSON (NodeStory s p)
171 instance (Serialise s, Serialise p) => Serialise (NodeStory s p)
173 data Archive s p = Archive
174 { _a_version :: !Version
177 -- first patch in the list is the most recent
178 -- We use `take` in `commitStatePatch`, that's why.
180 -- History is immutable, we just insert things on top of existing
183 -- We don't need to store the whole history in memory, this
184 -- structure holds only recent history, the one that will be
185 -- inserted to the DB.
187 deriving (Generic, Show)
189 instance (Serialise s, Serialise p) => Serialise (Archive s p)
192 type NodeListStory = NodeStory NgramsState' NgramsStatePatch'
194 type NgramsState' = Map TableNgrams.NgramsType NgramsTableMap
195 type NgramsStatePatch' = PatchMap TableNgrams.NgramsType NgramsTablePatch
196 instance Serialise NgramsStatePatch'
197 instance FromField (Archive NgramsState' NgramsStatePatch')
199 fromField = fromJSONField
200 instance DefaultFromField SqlJsonb (Archive NgramsState' NgramsStatePatch')
202 defaultFromField = fromPGSFromField
204 -- | Combine `NgramsState'`. This is because the structure is (Map
205 -- NgramsType (Map ...)) and the default `(<>)` operator is
207 -- (https://hackage.haskell.org/package/containers-0.6.6/docs/Data-Map-Internal.html#v:union)
208 combineState :: NgramsState' -> NgramsState' -> NgramsState'
209 combineState = Map.unionWith (<>)
211 instance (Semigroup s, Semigroup p) => Semigroup (Archive s p) where
212 (<>) (Archive { _a_history = p }) (Archive { _a_version = v'
214 , _a_history = p' }) =
215 Archive { _a_version = v'
217 , _a_history = p' <> p }
218 instance (Monoid s, Semigroup p) => Monoid (Archive s p) where
219 mempty = Archive { _a_version = 0
222 instance (FromJSON s, FromJSON p) => FromJSON (Archive s p) where
223 parseJSON = genericParseJSON $ unPrefix "_a_"
224 instance (ToJSON s, ToJSON p) => ToJSON (Archive s p) where
225 toJSON = genericToJSON $ unPrefix "_a_"
226 toEncoding = genericToEncoding $ unPrefix "_a_"
228 ------------------------------------------------------------------------
229 initNodeStory :: (Monoid s, Semigroup p) => NodeId -> NodeStory s p
230 initNodeStory ni = NodeStory $ Map.singleton ni initArchive
232 initArchive :: (Monoid s, Semigroup p) => Archive s p
235 initNodeListStoryMock :: NodeListStory
236 initNodeListStoryMock = NodeStory $ Map.singleton nodeListId archive
239 archive = Archive { _a_version = 0
240 , _a_state = ngramsTableMap
242 ngramsTableMap = Map.singleton TableNgrams.NgramsTerms
244 [ (n ^. ne_ngrams, ngramsElementToRepo n)
245 | n <- mockTable ^. _NgramsTable
248 ------------------------------------------------------------------------
251 ------------------------------------------------------------------------
252 -- | Lenses at the bottom of the file because Template Haskell would reorder order of execution in others cases
253 makeLenses ''NodeStoryEnv
254 makeLenses ''NodeStory
257 ----------------------------------------------------------------------
258 data NodeStoryPoly nid v ngtid ngid nre =
259 NodeStoryDB { node_id :: nid
261 , ngrams_type_id :: ngtid
263 , ngrams_repo_element :: nre }
266 data NodeStoryArchivePoly nid a =
267 NodeStoryArchiveDB { a_node_id :: nid
271 $(makeAdaptorAndInstance "pNodeStory" ''NodeStoryPoly)
272 $(makeAdaptorAndInstance "pNodeArchiveStory" ''NodeStoryArchivePoly)
274 -- type NodeStoryWrite = NodeStoryPoly (Column SqlInt4) (Column SqlInt4) (Column SqlInt4) (Column SqlInt4) (Column SqlJsonb)
275 -- type NodeStoryRead = NodeStoryPoly (Column SqlInt4) (Column SqlInt4) (Column SqlInt4) (Column SqlInt4) (Column SqlJsonb)
277 -- type NodeStoryArchiveWrite = NodeStoryArchivePoly (Column SqlInt4) (Column SqlJsonb)
278 -- type NodeStoryArchiveRead = NodeStoryArchivePoly (Column SqlInt4) (Column SqlJsonb)
280 type ArchiveList = Archive NgramsState' NgramsStatePatch'
284 runPGSExecute :: (PGS.ToRow q) => PGS.Connection -> PGS.Query -> q -> IO Int64
285 runPGSExecute c qs a = catch (PGS.execute c qs a) printError
287 printError (SomeException e) = do
288 --q' <- PGS.formatQuery c qs a
289 _ <- panic $ Text.pack $ show e
290 throw (SomeException e)
292 runPGSExecuteMany :: (PGS.ToRow q) => PGS.Connection -> PGS.Query -> [q] -> IO Int64
293 runPGSExecuteMany c qs a = catch (PGS.executeMany c qs a) printError
295 printError (SomeException e) = do
296 --q' <- PGS.formatQuery c qs a
297 _ <- panic $ Text.pack $ show e
298 throw (SomeException e)
300 runPGSQuery :: (PGS.FromRow r, PGS.ToRow q) => PGS.Connection -> PGS.Query -> q -> IO [r]
301 runPGSQuery c q a = catch (PGS.query c q a) printError
303 printError (SomeException e) = do
304 q' <- PGS.formatQuery c q a
306 throw (SomeException e)
308 runPGSAdvisoryLock :: PGS.Connection -> Int -> IO ()
309 runPGSAdvisoryLock c id = do
310 _ <- runPGSQuery c [sql| SELECT pg_advisory_lock(?) |] (PGS.Only id) :: IO [PGS.Only ()]
313 runPGSAdvisoryUnlock :: PGS.Connection -> Int -> IO ()
314 runPGSAdvisoryUnlock c id = do
315 _ <- runPGSQuery c [sql| SELECT pg_advisory_unlock(?) |] (PGS.Only id) :: IO [PGS.Only Bool]
318 runPGSAdvisoryXactLock :: PGS.Connection -> Int -> IO ()
319 runPGSAdvisoryXactLock c id = do
320 _ <- runPGSQuery c [sql| SELECT pg_advisory_xact_lock(?) |] (PGS.Only id) :: IO [PGS.Only ()]
323 nodeExists :: PGS.Connection -> NodeId -> IO Bool
324 nodeExists c nId = (== [PGS.Only True])
325 <$> runPGSQuery c [sql| SELECT true FROM nodes WHERE id = ? LIMIT 1 |] (PGS.Only nId)
327 getNodesIdWithType :: PGS.Connection -> NodeType -> IO [NodeId]
328 getNodesIdWithType c nt = do
329 ns <- runPGSQuery c query (PGS.Only $ nodeTypeId nt)
330 pure $ map (\(PGS.Only nId) -> NodeId nId) ns
333 query = [sql| SELECT id FROM nodes WHERE typename = ? |]
337 -- nodeStoryTable :: Table NodeStoryRead NodeStoryWrite
339 -- Table "node_stories"
340 -- ( pNodeStory NodeStoryDB { node_id = tableField "node_id"
341 -- , version = tableField "version"
342 -- , ngrams_type_id = tableField "ngrams_type_id"
343 -- , ngrams_id = tableField "ngrams_id"
344 -- , ngrams_repo_element = tableField "ngrams_repo_element"
347 -- nodeStoryArchiveTable :: Table NodeStoryArchiveRead NodeStoryArchiveWrite
348 -- nodeStoryArchiveTable =
349 -- Table "node_story_archive_history"
350 -- ( pNodeArchiveStory NodeStoryArchiveDB { a_node_id = tableField "node_id"
351 -- , archive = tableField "archive" } )
353 -- nodeStorySelect :: Select NodeStoryRead
354 -- nodeStorySelect = selectTable nodeStoryTable
356 -- NOTE "first patch in the _a_history list is the most recent"
357 getNodeArchiveHistory :: PGS.Connection -> NodeId -> IO [NgramsStatePatch']
358 getNodeArchiveHistory c nodeId = do
359 as <- runPGSQuery c query (PGS.Only nodeId) :: IO [(TableNgrams.NgramsType, NgramsTerm, NgramsPatch)]
360 pure $ (\(ngramsType, terms, patch) -> fst $ PM.singleton ngramsType (NgramsTablePatch $ fst $ PM.singleton terms patch)) <$> as
363 query = [sql| SELECT ngrams_type_id, terms, patch
364 FROM node_story_archive_history
365 JOIN ngrams ON ngrams.id = ngrams_id
367 ORDER BY (version, node_story_archive_history.id) DESC |]
369 ngramsIdQuery :: PGS.Query
370 ngramsIdQuery = [sql| SELECT id FROM ngrams WHERE terms = ? |]
373 insertNodeArchiveHistory :: PGS.Connection -> NodeId -> Version -> [NgramsStatePatch'] -> IO ()
374 insertNodeArchiveHistory _ _ _ [] = pure ()
375 insertNodeArchiveHistory c nodeId version (h:hs) = do
376 let tuples = mconcat $ (\(nType, (NgramsTablePatch patch)) ->
378 (nodeId, nType, term, p)) <$> PM.toList patch) <$> PM.toList h :: [(NodeId, TableNgrams.NgramsType, NgramsTerm, NgramsPatch)]
379 tuplesM <- mapM (\(nId, nType, term, patch) -> do
380 ngrams <- runPGSQuery c ngramsIdQuery (PGS.Only term)
381 pure $ (\(PGS.Only termId) -> (nId, nType, termId, term, patch)) <$> (headMay ngrams)
382 ) tuples :: IO [Maybe (NodeId, TableNgrams.NgramsType, Int, NgramsTerm, NgramsPatch)]
383 _ <- runPGSExecuteMany c query $ ((\(nId, nType, termId, _term, patch) -> (nId, nType, termId, patch, version)) <$> (catMaybes tuplesM))
384 _ <- insertNodeArchiveHistory c nodeId version hs
389 query = [sql| INSERT INTO node_story_archive_history(node_id, ngrams_type_id, ngrams_id, patch, version) VALUES (?, ?, ?, ?, ?) |]
391 getNodeStory :: PGS.Connection -> NodeId -> IO NodeListStory
392 getNodeStory c nId@(NodeId nodeId) = do
393 --res <- withResource pool $ \c -> runSelect c query :: IO [NodeStoryPoly NodeId Version Int Int NgramsRepoElement]
394 res <- runPGSQuery c nodeStoriesQuery (PGS.Only nodeId) :: IO [(Version, TableNgrams.NgramsType, NgramsTerm, NgramsRepoElement)]
395 -- We have multiple rows with same node_id and different (ngrams_type_id, ngrams_id).
396 -- Need to create a map: {<node_id>: {<ngrams_type_id>: {<ngrams_id>: <data>}}}
397 let dbData = map (\(version, ngramsType, ngrams, ngrams_repo_element) ->
398 Archive { _a_version = version
400 , _a_state = Map.singleton ngramsType $ Map.singleton ngrams ngrams_repo_element }) res
401 -- NOTE When concatenating, check that the same version is for all states
402 pure $ NodeStory $ Map.singleton nId $ foldl combine mempty dbData
403 --pure $ NodeStory $ Map.fromListWith (<>) $ (\(NodeStoryDB nId a) -> (nId, a)) <$> res
405 -- NOTE (<>) for Archive doesn't concatenate states, so we have to use `combine`
406 combine a1 a2 = a1 & a_state %~ combineState (a2 ^. a_state)
407 & a_version .~ (a2 ^. a_version) -- version should be updated from list, not taken from the empty Archive
409 nodeStoriesQuery :: PGS.Query
410 nodeStoriesQuery = [sql| SELECT version, ngrams_type_id, terms, ngrams_repo_element
412 JOIN ngrams ON ngrams.id = ngrams_id
415 type ArchiveStateList = [(TableNgrams.NgramsType, NgramsTerm, NgramsRepoElement)]
417 -- Functions to convert archive state (which is a Map NgramsType (Map
418 -- NgramsTerm NgramsRepoElement)) to/from a flat list
419 archiveStateAsList :: NgramsState' -> ArchiveStateList
420 archiveStateAsList s = mconcat $ (\(nt, ntm) -> (\(n, nre) -> (nt, n, nre)) <$> Map.toList ntm) <$> Map.toList s
422 archiveStateFromList :: ArchiveStateList -> NgramsState'
423 archiveStateFromList l = Map.fromListWith (<>) $ (\(nt, t, nre) -> (nt, Map.singleton t nre)) <$> l
425 -- | This function inserts whole new node story and archive for given node_id.
426 insertNodeStory :: PGS.Connection -> NodeId -> ArchiveList -> IO ()
427 insertNodeStory c (NodeId nId) a = do
428 _ <- mapM (\(ngramsType, ngrams, ngramsRepoElement) -> do
429 termIdM <- runPGSQuery c ngramsIdQuery (PGS.Only ngrams) :: IO [PGS.Only Int64]
430 case headMay termIdM of
432 Just (PGS.Only termId) -> runPGSExecuteMany c query [(nId, a ^. a_version, ngramsType, termId, ngramsRepoElement)]) $ archiveStateAsList $ a ^. a_state
433 -- runInsert c $ insert ngramsType ngrams ngramsRepoElement) $ archiveStateAsList _a_state
438 query = [sql| INSERT INTO node_stories(node_id, ngrams_type_id, ngrams_id, ngrams_repo_element) VALUES (?, ?, ?, ?) |]
439 -- insert ngramsType ngrams ngramsRepoElement =
440 -- Insert { iTable = nodeStoryTable
441 -- , iRows = [NodeStoryDB { node_id = sqlInt4 nId
442 -- , version = sqlInt4 _a_version
443 -- , ngrams_type_id = sqlInt4 $ TableNgrams.ngramsTypeId ngramsType
445 -- , ngrams_repo_element = sqlValueJSONB ngramsRepoElement
447 -- , iReturning = rCount
448 -- , iOnConflict = Nothing }
450 insertArchiveList :: PGS.Connection -> NodeId -> ArchiveList -> IO ()
451 insertArchiveList c nodeId a = do
452 _ <- mapM_ (\(nt, n, nre) -> runPGSExecute c query (nodeId, a ^. a_version, nt, nre, n)) (archiveStateAsList $ a ^. a_state)
453 --_ <- runPGSExecuteMany c query $ (\(nt, n, nre) -> (nodeId, a ^. a_version, nt, nre, n)) <$> (archiveStateAsList $ a ^. a_state)
457 query = [sql| INSERT INTO node_stories(node_id, version, ngrams_type_id, ngrams_id, ngrams_repo_element)
458 SELECT ?, ?, ?, ngrams.id, ? FROM ngrams WHERE terms = ? |]
460 deleteArchiveList :: PGS.Connection -> NodeId -> ArchiveList -> IO ()
461 deleteArchiveList c nodeId a = do
462 _ <- mapM_ (\(nt, n, _) -> runPGSExecute c query (nodeId, nt, n)) (archiveStateAsList $ a ^. a_state)
463 --_ <- runPGSExecuteMany c query $ (\(nt, n, _) -> (nodeId, nt, n)) <$> (archiveStateAsList $ a ^. a_state)
467 query = [sql| DELETE FROM node_stories
468 WHERE node_id = ? AND ngrams_type_id = ? AND ngrams_id IN (SELECT id FROM ngrams WHERE terms = ?) |]
470 updateArchiveList :: PGS.Connection -> NodeId -> ArchiveList -> IO ()
471 updateArchiveList c nodeId a = do
472 let params = (\(nt, n, nre) -> (nre, nodeId, nt, n)) <$> (archiveStateAsList $ a ^. a_state)
473 --q <- PGS.format c query params
474 --printDebug "[updateArchiveList] query" q
475 _ <- mapM (\p -> runPGSExecute c query p) params
479 query = [sql| UPDATE node_stories
480 SET ngrams_repo_element = ?
481 WHERE node_id = ? AND ngrams_type_id = ? AND ngrams_id IN (SELECT id FROM ngrams WHERE terms = ?) |]
483 -- | This function updates the node story and archive for given node_id.
484 updateNodeStory :: PGS.Connection -> NodeId -> ArchiveList -> ArchiveList -> IO ()
485 updateNodeStory c nodeId@(NodeId _nId) currentArchive newArchive = do
488 -- 0. We assume we're inside an advisory lock
490 -- 1. Find differences (inserts/updates/deletes)
491 let currentList = archiveStateAsList $ currentArchive ^. a_state
492 let newList = archiveStateAsList $ newArchive ^. a_state
493 let currentSet = Set.fromList $ (\(nt, n, _) -> (nt, n)) <$> currentList
494 let newSet = Set.fromList $ (\(nt, n, _) -> (nt, n)) <$> newList
496 let inserts = filter (\(nt, n, _) -> Set.member (nt, n) $ Set.difference newSet currentSet) newList
497 --printDebug "[updateNodeStory] inserts" inserts
498 let deletes = filter (\(nt, n, _) -> Set.member (nt, n) $ Set.difference currentSet newSet) currentList
499 --printDebug "[updateNodeStory] deletes" deletes
501 -- updates are the things that are in new but not in current
502 let updates = Set.toList $ Set.difference (Set.fromList newList) (Set.fromList currentList)
503 --printDebug "[updateNodeStory] updates" $ Text.unlines $ (Text.pack . show) <$> updates
505 -- 2. Perform inserts/deletes/updates
506 printDebug "[updateNodeStory] applying insert" ()
507 insertArchiveList c nodeId $ Archive { _a_version = newArchive ^. a_version
509 , _a_state = archiveStateFromList inserts }
510 printDebug "[updateNodeStory] insert applied" ()
511 --TODO Use currentArchive ^. a_version in delete and report error
512 -- if entries with (node_id, ngrams_type_id, ngrams_id) but
513 -- different version are found.
514 deleteArchiveList c nodeId $ Archive { _a_version = newArchive ^. a_version
516 , _a_state = archiveStateFromList deletes }
517 printDebug "[updateNodeStory] delete applied" ()
518 updateArchiveList c nodeId $ Archive { _a_version = newArchive ^. a_version
520 , _a_state = archiveStateFromList updates }
521 printDebug "[updateNodeStory] update applied" ()
525 -- update = Update { uTable = nodeStoryTable
526 -- , uUpdateWith = updateEasy (\(NodeStoryDB { node_id }) ->
527 -- NodeStoryDB { archive = sqlValueJSONB $ Archive { _a_history = emptyHistory
530 -- , uWhere = (\row -> node_id row .== sqlInt4 nId)
531 -- , uReturning = rCount }
533 -- nodeStoryRemove :: Pool PGS.Connection -> NodeId -> IO Int64
534 -- nodeStoryRemove pool (NodeId nId) = withResource pool $ \c -> runDelete c delete
536 -- delete = Delete { dTable = nodeStoryTable
537 -- , dWhere = (\row -> node_id row .== sqlInt4 nId)
538 -- , dReturning = rCount }
540 upsertNodeStories :: PGS.Connection -> NodeId -> ArchiveList -> IO ()
541 upsertNodeStories c nodeId@(NodeId nId) newArchive = do
542 printDebug "[upsertNodeStories] START nId" nId
543 PGS.withTransaction c $ do
544 printDebug "[upsertNodeStories] locking nId" nId
545 runPGSAdvisoryXactLock c nId
547 -- whether it's insert or update, we can insert node archive history already
548 -- NOTE: It is assumed that the most recent change is the first in the
549 -- list, so we save these in reverse order
550 insertNodeArchiveHistory c nodeId (newArchive ^. a_version) $ reverse $ newArchive ^. a_history
552 (NodeStory m) <- getNodeStory c nodeId
553 case Map.lookup nodeId m of
555 _ <- insertNodeStory c nodeId newArchive
557 Just currentArchive -> do
558 _ <- updateNodeStory c nodeId currentArchive newArchive
561 printDebug "[upsertNodeStories] STOP nId" nId
563 writeNodeStories :: PGS.Connection -> NodeListStory -> IO ()
564 writeNodeStories c (NodeStory nls) = do
565 _ <- mapM (\(nId, a) -> upsertNodeStories c nId a) $ Map.toList nls
568 -- | Returns a `NodeListStory`, updating the given one for given `NodeId`
569 nodeStoryInc :: PGS.Connection -> Maybe NodeListStory -> NodeId -> IO NodeListStory
570 nodeStoryInc c Nothing nId = getNodeStory c nId
571 nodeStoryInc c (Just ns@(NodeStory nls)) nId = do
572 case Map.lookup nId nls of
574 (NodeStory nls') <- getNodeStory c nId
575 pure $ NodeStory $ Map.union nls nls'
578 nodeStoryIncs :: PGS.Connection -> Maybe NodeListStory -> [NodeId] -> IO NodeListStory
579 nodeStoryIncs _ Nothing [] = pure $ NodeStory $ Map.empty
580 nodeStoryIncs c (Just nls) ns = foldM (\m n -> nodeStoryInc c (Just m) n) nls ns
581 nodeStoryIncs c Nothing (ni:ns) = do
582 m <- getNodeStory c ni
583 nodeStoryIncs c (Just m) ns
585 -- nodeStoryDec :: Pool PGS.Connection -> NodeListStory -> NodeId -> IO NodeListStory
586 -- nodeStoryDec pool ns@(NodeStory nls) ni = do
587 -- case Map.lookup ni nls of
589 -- _ <- nodeStoryRemove pool ni
592 -- let ns' = Map.filterWithKey (\k _v -> k /= ni) nls
593 -- _ <- nodeStoryRemove pool ni
594 -- pure $ NodeStory ns'
595 ------------------------------------
597 readNodeStoryEnv :: Pool PGS.Connection -> IO NodeStoryEnv
598 readNodeStoryEnv pool = do
599 mvar <- nodeStoryVar pool Nothing []
600 saver <- mkNodeStorySaver pool mvar
601 let saver_immediate = modifyMVar_ mvar $ \ns -> do
602 withResource pool $ \c -> do
603 --printDebug "[mkNodeStorySaver] will call writeNodeStories, ns" ns
604 writeNodeStories c ns
605 pure $ clearHistory ns
606 -- let saver = modifyMVar_ mvar $ \mv -> do
607 -- writeNodeStories pool mv
608 -- printDebug "[readNodeStoryEnv] saver" mv
609 -- let mv' = clearHistory mv
610 -- printDebug "[readNodeStoryEnv] saver, cleared" mv'
612 pure $ NodeStoryEnv { _nse_var = mvar
614 , _nse_saver_immediate = saver_immediate
615 , _nse_getter = nodeStoryVar pool (Just mvar)
618 nodeStoryVar :: Pool PGS.Connection
619 -> Maybe (MVar NodeListStory)
621 -> IO (MVar NodeListStory)
622 nodeStoryVar pool Nothing nIds = do
623 state <- withResource pool $ \c -> nodeStoryIncs c Nothing nIds
625 nodeStoryVar pool (Just mv) nIds = do
626 _ <- withResource pool
627 $ \c -> modifyMVar_ mv
628 $ \nsl -> (nodeStoryIncs c (Just nsl) nIds)
631 -- Debounce is useful since it could delay the saving to some later
632 -- time, asynchronously and we keep operating on memory only.
633 mkNodeStorySaver :: Pool PGS.Connection -> MVar NodeListStory -> IO (IO ())
634 mkNodeStorySaver pool mvns = mkDebounce settings
636 settings = defaultDebounceSettings
637 { debounceAction = do
638 -- NOTE: Lock MVar first, then use resource pool.
639 -- Otherwise we could wait for MVar, while
640 -- blocking the pool connection.
641 modifyMVar_ mvns $ \ns -> do
642 withResource pool $ \c -> do
643 --printDebug "[mkNodeStorySaver] will call writeNodeStories, ns" ns
644 writeNodeStories c ns
645 pure $ clearHistory ns
646 --withMVar mvns (\ns -> printDebug "[mkNodeStorySaver] debounce nodestory" ns)
647 , debounceFreq = 1*minute
650 second = 10^(6 :: Int)
652 clearHistory :: NodeListStory -> NodeListStory
653 clearHistory (NodeStory ns) = NodeStory $ ns & (traverse . a_history) .~ emptyHistory
655 emptyHistory = [] :: [NgramsStatePatch']
657 currentVersion :: (HasNodeStory env err m) => ListId -> m Version
658 currentVersion listId = do
659 pool <- view connPool
660 nls <- withResource pool $ \c -> liftBase $ getNodeStory c listId
661 pure $ nls ^. unNodeStory . at listId . _Just . a_version
664 -- mkNodeStorySaver :: MVar NodeListStory -> Cmd err (Cmd err ())
665 -- mkNodeStorySaver mvns = mkDebounce settings
667 -- settings = defaultDebounceSettings
668 -- { debounceAction = withMVar mvns (\ns -> writeNodeStories ns)
669 -- , debounceFreq = 1 * minute
670 -- -- , debounceEdge = trailingEdge -- Trigger on the trailing edge
672 -- minute = 60 * second
673 -- second = 10^(6 :: Int)
676 -----------------------------------------